diff --git a/AI/caffe/1.0/24.03-lts-sp1/Dockerfile b/AI/caffe/1.0/24.03-lts-sp1/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..a56a6530269e85593c1f52be5ae0e385996d7846 --- /dev/null +++ b/AI/caffe/1.0/24.03-lts-sp1/Dockerfile @@ -0,0 +1,59 @@ +ARG BASE=openeuler/openeuler:24.03-lts-sp1 +FROM ${BASE} + +ARG VERSION=1.0 + +RUN yum install -y \ + make \ + gcc \ + gcc-c++ \ + cmake \ + git \ + wget \ + boost \ + gflags \ + glog-devel \ + hdf5-devel \ + leveldb-devel \ + lmdb-devel \ + protobuf-devel \ + snappy-devel \ + python-pandas \ + boost-devel \ + python3-devel \ + opencv \ + openblas-devel \ + patch \ + python-pip \ + doxygen \ + ghostscript \ + texlive && \ + yum clean all + +ENV CAFFE_ROOT=/opt/caffe +WORKDIR $CAFFE_ROOT + +RUN git clone -b ${VERSION} --depth 1 https://github.com/BVLC/caffe.git . && \ + pip install --upgrade pip && \ + cd python && \ + for req in $(cat requirements.txt) pydot; do pip install $req; done && \ + ln -s /usr/lib64/libboost_python311.so.1.83.0 /usr/lib64/libboost_python.so && \ + ln -s /usr/bin/python3 /usr/bin/python + +COPY opencv.patch $CAFFE_ROOT/ +COPY protobuf.patch $CAFFE_ROOT/ +COPY Makefile.patch $CAFFE_ROOT/ +RUN cd $CAFFE_ROOT && \ + patch -Np1 < Makefile.patch && \ + patch -Np1 < opencv.patch && \ + patch -Np1 < protobuf.patch && \ + mkdir build && cd build && \ + cmake -DCPU_ONLY=1 .. && \ + make -j"$(nproc)" + +ENV PYCAFFE_ROOT $CAFFE_ROOT/python +ENV PYTHONPATH $PYCAFFE_ROOT:$PYTHONPATH +ENV PATH $CAFFE_ROOT/build/tools:$PYCAFFE_ROOT:$PATH +RUN echo "$CAFFE_ROOT/build/lib" >> /etc/ld.so.conf.d/caffe.conf && ldconfig + +WORKDIR /workspace diff --git a/AI/caffe/1.0/24.03-lts-sp1/Makefile.patch b/AI/caffe/1.0/24.03-lts-sp1/Makefile.patch new file mode 100644 index 0000000000000000000000000000000000000000..73eaaf7634b405893cbd0acacd137b185d65b57d --- /dev/null +++ b/AI/caffe/1.0/24.03-lts-sp1/Makefile.patch @@ -0,0 +1,26 @@ +diff --git a/Makefile b/Makefile +index 4d324160..fc3f66c5 100644 +--- a/Makefile ++++ b/Makefile +@@ -365,7 +365,7 @@ ifeq ($(WITH_PYTHON_LAYER), 1) + endif + + # BLAS configuration (default = ATLAS) +-BLAS ?= atlas ++BLAS ?= open + ifeq ($(BLAS), mkl) + # MKL + LIBRARIES += mkl_rt +diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake +index 4a5bac47..231d88ee 100644 +--- a/cmake/Dependencies.cmake ++++ b/cmake/Dependencies.cmake +@@ -106,7 +106,7 @@ endif() + + # ---[ BLAS + if(NOT APPLE) +- set(BLAS "Atlas" CACHE STRING "Selected BLAS library") ++ set(BLAS "Open" CACHE STRING "Selected BLAS library") + set_property(CACHE BLAS PROPERTY STRINGS "Atlas;Open;MKL") + + if(BLAS STREQUAL "Atlas" OR BLAS STREQUAL "atlas") diff --git a/AI/caffe/1.0/24.03-lts-sp1/opencv.patch b/AI/caffe/1.0/24.03-lts-sp1/opencv.patch new file mode 100644 index 0000000000000000000000000000000000000000..d3eec0139fefa2e79e937acfa2e65465d04112cd --- /dev/null +++ b/AI/caffe/1.0/24.03-lts-sp1/opencv.patch @@ -0,0 +1,116 @@ +From 7f503bd9a19758a173064e299ab9d4cac65ed60f Mon Sep 17 00:00:00 2001 +From: Daniel Bermond +Date: Mon, 26 Nov 2018 13:11:23 +0000 +Subject: [PATCH] Fix build with OpenCV 4.0 + +--- + Makefile | 16 ++++++++++++++-- + Makefile.config.example | 2 +- + src/caffe/layers/window_data_layer.cpp | 2 +- + src/caffe/test/test_io.cpp | 4 ++-- + src/caffe/util/io.cpp | 8 ++++---- + 5 files changed, 22 insertions(+), 10 deletions(-) + +diff --git a/Makefile b/Makefile +index b7660e852d6..95f3443adab 100644 +--- a/Makefile ++++ b/Makefile +@@ -200,9 +200,17 @@ endif + ifeq ($(USE_OPENCV), 1) + LIBRARIES += opencv_core opencv_highgui opencv_imgproc + +- ifeq ($(OPENCV_VERSION), 3) ++ ifeq ($(OPENCV_VERSION), $(filter $(OPENCV_VERSION), 3 4)) + LIBRARIES += opencv_imgcodecs + endif ++ ifeq ($(OPENCV_VERSION), 4) ++ ifeq ($(USE_PKG_CONFIG), 1) ++ INCLUDE_DIRS += $(shell pkg-config opencv4 --cflags-only-I | sed 's/-I//g') ++ else ++ INCLUDE_DIRS += /usr/include/opencv4 /usr/local/include/opencv4 ++ INCLUDE_DIRS += /usr/include/opencv4/opencv /usr/local/include/opencv4/opencv ++ endif ++ endif + + endif + PYTHON_LIBRARIES ?= boost_python python2.7 +@@ -429,7 +437,11 @@ LINKFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) + + USE_PKG_CONFIG ?= 0 + ifeq ($(USE_PKG_CONFIG), 1) +- PKG_CONFIG := $(shell pkg-config opencv --libs) ++ ifeq ($(OPENCV_VERSION), 4) ++ PKG_CONFIG := $(shell pkg-config opencv4 --libs) ++ else ++ PKG_CONFIG := $(shell pkg-config opencv --libs) ++ endif + else + PKG_CONFIG := + endif +diff --git a/Makefile.config.example b/Makefile.config.example +index 24ca632783a..24802e91534 100644 +--- a/Makefile.config.example ++++ b/Makefile.config.example +@@ -19,7 +19,7 @@ + # possibility of simultaneous read and write + # ALLOW_LMDB_NOLOCK := 1 + +-# Uncomment if you're using OpenCV 3 ++# Uncomment and set accordingly if you're using OpenCV 3/4 + # OPENCV_VERSION := 3 + + # To customize your choice of compiler, uncomment and set the following. +diff --git a/src/caffe/layers/window_data_layer.cpp b/src/caffe/layers/window_data_layer.cpp +index 1bf3760e9fd..f41169debe4 100644 +--- a/src/caffe/layers/window_data_layer.cpp ++++ b/src/caffe/layers/window_data_layer.cpp +@@ -290,7 +290,7 @@ void WindowDataLayer::load_batch(Batch* batch) { + image_database_cache_[window[WindowDataLayer::IMAGE_INDEX]]; + cv_img = DecodeDatumToCVMat(image_cached.second, true); + } else { +- cv_img = cv::imread(image.first, CV_LOAD_IMAGE_COLOR); ++ cv_img = cv::imread(image.first, cv::IMREAD_COLOR); + if (!cv_img.data) { + LOG(ERROR) << "Could not open or find file " << image.first; + return; +diff --git a/src/caffe/test/test_io.cpp b/src/caffe/test/test_io.cpp +index c2c919e90dc..b80df287fba 100644 +--- a/src/caffe/test/test_io.cpp ++++ b/src/caffe/test/test_io.cpp +@@ -20,8 +20,8 @@ class IOTest : public ::testing::Test {}; + bool ReadImageToDatumReference(const string& filename, const int label, + const int height, const int width, const bool is_color, Datum* datum) { + cv::Mat cv_img; +- int cv_read_flag = (is_color ? CV_LOAD_IMAGE_COLOR : +- CV_LOAD_IMAGE_GRAYSCALE); ++ int cv_read_flag = (is_color ? cv::IMREAD_COLOR : ++ cv::IMREAD_GRAYSCALE); + + cv::Mat cv_img_origin = cv::imread(filename, cv_read_flag); + if (!cv_img_origin.data) { +diff --git a/src/caffe/util/io.cpp b/src/caffe/util/io.cpp +index 5295d9dddb9..1f9167a114f 100644 +--- a/src/caffe/util/io.cpp ++++ b/src/caffe/util/io.cpp +@@ -73,8 +73,8 @@ void WriteProtoToBinaryFile(const Message& proto, const char* filename) { + cv::Mat ReadImageToCVMat(const string& filename, + const int height, const int width, const bool is_color) { + cv::Mat cv_img; +- int cv_read_flag = (is_color ? CV_LOAD_IMAGE_COLOR : +- CV_LOAD_IMAGE_GRAYSCALE); ++ int cv_read_flag = (is_color ? cv::IMREAD_COLOR : ++ cv::IMREAD_GRAYSCALE); + cv::Mat cv_img_origin = cv::imread(filename, cv_read_flag); + if (!cv_img_origin.data) { + LOG(ERROR) << "Could not open or find file " << filename; +@@ -179,8 +179,8 @@ cv::Mat DecodeDatumToCVMat(const Datum& datum, bool is_color) { + CHECK(datum.encoded()) << "Datum not encoded"; + const string& data = datum.data(); + std::vector vec_data(data.c_str(), data.c_str() + data.size()); +- int cv_read_flag = (is_color ? CV_LOAD_IMAGE_COLOR : +- CV_LOAD_IMAGE_GRAYSCALE); ++ int cv_read_flag = (is_color ? cv::IMREAD_COLOR : ++ cv::IMREAD_GRAYSCALE); + cv_img = cv::imdecode(vec_data, cv_read_flag); + if (!cv_img.data) { + LOG(ERROR) << "Could not decode datum "; \ No newline at end of file diff --git a/AI/caffe/1.0/24.03-lts-sp1/protobuf.patch b/AI/caffe/1.0/24.03-lts-sp1/protobuf.patch new file mode 100644 index 0000000000000000000000000000000000000000..c17069889ce1b8b69f70f6255b5e6b951ef82281 --- /dev/null +++ b/AI/caffe/1.0/24.03-lts-sp1/protobuf.patch @@ -0,0 +1,22 @@ +From 1b317bab3f6413a1b5d87c9d3a300d785a4173f9 Mon Sep 17 00:00:00 2001 +From: kr0p07k1n <91276442+kr0p07k1n@users.noreply.github.com> +Date: Sun, 28 Nov 2021 19:36:04 -0500 +Subject: [PATCH] use one parameter when calling SetTotalBytesLimit + +--- + src/caffe/util/io.cpp | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/caffe/util/io.cpp b/src/caffe/util/io.cpp +index 5295d9dddb9..f71553c9936 100644 +--- a/src/caffe/util/io.cpp ++++ b/src/caffe/util/io.cpp +@@ -54,7 +54,7 @@ bool ReadProtoFromBinaryFile(const char* filename, Message* proto) { + CHECK_NE(fd, -1) << "File not found: " << filename; + ZeroCopyInputStream* raw_input = new FileInputStream(fd); + CodedInputStream* coded_input = new CodedInputStream(raw_input); +- coded_input->SetTotalBytesLimit(kProtoReadBytesLimit, 536870912); ++ coded_input->SetTotalBytesLimit(kProtoReadBytesLimit); + + bool success = proto->ParseFromCodedStream(coded_input); + \ No newline at end of file diff --git a/AI/caffe/meta.yml b/AI/caffe/meta.yml new file mode 100644 index 0000000000000000000000000000000000000000..c316d6f23480dbe3baf50a39cd27a2f20254006c --- /dev/null +++ b/AI/caffe/meta.yml @@ -0,0 +1,2 @@ +1.0-oe2403sp1: + path: 1.0/24.03-lts-sp1/Dockerfile \ No newline at end of file diff --git a/AI/image-list.yml b/AI/image-list.yml index 37ba34e6e3c38d4ee5d59228c29b3d00b3691373..03abcd39802a8970c5cc9aa706e20be8881244ef 100644 --- a/AI/image-list.yml +++ b/AI/image-list.yml @@ -47,3 +47,4 @@ images: vllm-ascend: vllm-ascend langchain: langchain open-webui: open-webui + caffe: caffe \ No newline at end of file diff --git a/Bigdata/consul/1.20.5/24.03-lts-sp1/Dockerfile b/Bigdata/consul/1.20.5/24.03-lts-sp1/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..3d2a52409f67fca6df7e1ba54a4426a065ee22cb --- /dev/null +++ b/Bigdata/consul/1.20.5/24.03-lts-sp1/Dockerfile @@ -0,0 +1,19 @@ +ARG BASE=openeuler/openeuler:24.03-lts-sp1 +FROM ${BASE} + +ARG VERSION=1.20.5 + +RUN yum install -y golang && \ + yum clean all + +RUN curl -fSL -o consul.tar.gz https://github.com/hashicorp/consul/archive/refs/tags/v${VERSION}.tar.gz && \ + tar -zxvf consul.tar.gz && \ + cd consul-${VERSION} && \ + go build && \ + cp consul /usr/bin/ && \ + cd .. && \ + rm -rf consul-${VERSION} consul.tar.gz + +WORKDIR /usr/bin +EXPOSE 8500 8600 +ENTRYPOINT ["consul"] diff --git a/Bigdata/consul/meta.yml b/Bigdata/consul/meta.yml new file mode 100644 index 0000000000000000000000000000000000000000..e6b12ed3be124cca5cd9351eeca53ec560a82488 --- /dev/null +++ b/Bigdata/consul/meta.yml @@ -0,0 +1,2 @@ +1.20.5-oe2403sp1: + path: 1.20.5/24.03-lts-sp1/Dockerfile \ No newline at end of file diff --git a/Bigdata/doris/2.1.9/24.03.-lts-sp1/Dockerfile b/Bigdata/doris/2.1.9/24.03.-lts-sp1/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..ff409ad4e57678798b92d6817c79f8e19270c7f6 --- /dev/null +++ b/Bigdata/doris/2.1.9/24.03.-lts-sp1/Dockerfile @@ -0,0 +1,14 @@ +ARG BASE=openeuler/openeuler:24.03-lts-sp1 +FROM ${BASE} + +ARG VERSION=2.1.9 +ARG TARGETARCH +ARG BUILDARCH +RUN if [ "$TARGETARCH" = "amd64" ]; then \ + BUILDARCH="x64"; \ + elif [ "$TARGETARCH" = "arm64" ]; then \ + BUILDARCH="arm64"; \ + fi; \ + curl -fSL -o doris.tar.gz https://apache-doris-releases.oss-accelerate.aliyuncs.com/apache-doris-${VERSION}-bin-${BUILDARCH}.tar.gz && \ + mkdir -p /usr/local/bin && \ + tar -zxf doris.tar.gz -C /usr/local/ --strip-components=1 diff --git a/Bigdata/doris/meta.yml b/Bigdata/doris/meta.yml new file mode 100644 index 0000000000000000000000000000000000000000..3a87c12e3ae23454ba023bc7759ed23166449e87 --- /dev/null +++ b/Bigdata/doris/meta.yml @@ -0,0 +1,2 @@ +2.1.9-oe2403sp1: + path: 2.1.9/24.03-lts-sp1/Dockerfile \ No newline at end of file diff --git a/Bigdata/elasticsearch/8.17.3/24.03-lts-sp1/Dockerfile b/Bigdata/elasticsearch/8.17.3/24.03-lts-sp1/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..6050fcc56a53b98a6f3c9d305ac5c6dd01ce6349 --- /dev/null +++ b/Bigdata/elasticsearch/8.17.3/24.03-lts-sp1/Dockerfile @@ -0,0 +1,69 @@ +ARG BASE=openeuler/openeuler:24.03-lts-sp1 +FROM ${BASE} AS builder + +ARG TARGETARCH +ARG BUILDARCH +ARG VERSION=8.17.3 +ARG TINI_VERSION=0.19.0 + +RUN curl --retry 10 -S -L -O https://github.com/krallin/tini/releases/download/v${TINI_VERSION}/tini-${TARGETARCH} ; \ + curl --retry 10 -S -L -O https://github.com/krallin/tini/releases/download/v${TINI_VERSION}/tini-${TARGETARCH}.sha256sum ; \ + sha256sum -c tini-${TARGETARCH}.sha256sum ; \ + rm tini-${TARGETARCH}.sha256sum ; \ + mv tini-${TARGETARCH} /bin/tini ; \ + chmod 0555 /bin/tini + +RUN mkdir /usr/share/elasticsearch +WORKDIR /usr/share/elasticsearch + +RUN if [ "$TARGETARCH" = "amd64" ]; then \ + BUILDARCH="x86_64"; \ + elif [ "$TARGETARCH" = "arm64" ]; then \ + BUILDARCH="aarch64"; \ + fi; \ + curl --retry 10 -S -L --output /tmp/elasticsearch.tar.gz https://artifacts-no-kpi.elastic.co/downloads/elasticsearch/elasticsearch-${VERSION}-linux-${BUILDARCH}.tar.gz + +RUN tar -zxf /tmp/elasticsearch.tar.gz --strip-components=1 + +COPY config/elasticsearch.yml config/ +COPY config/log4j2.properties config/log4j2.docker.properties + +RUN yum install -y findutils +RUN sed -i -e 's/ES_DISTRIBUTION_TYPE=tar/ES_DISTRIBUTION_TYPE=docker/' bin/elasticsearch-env && \ + mkdir data && \ + mv config/log4j2.properties config/log4j2.file.properties && \ + mv config/log4j2.docker.properties config/log4j2.properties && \ + find . -type d -exec chmod 0555 {} + && \ + find . -type f -exec chmod 0444 {} + && \ + chmod 0555 bin/* jdk/bin/* jdk/lib/jspawnhelper modules/x-pack-ml/platform/linux-*/bin/* && \ + chmod 0775 bin config config/jvm.options.d data logs plugins && \ + find config -type f -exec chmod 0664 {} + + + +FROM ${BASE} + +RUN yum install -y shadow findutils nc p11-kit unzip zip && yum clean all +RUN groupadd -g 1000 elasticsearch && \ + useradd -u 1000 -g 1000 -d /usr/share/elasticsearch -m elasticsearch && \ + usermod -aG root elasticsearch && \ + chown -R 0:0 /usr/share/elasticsearch + +ENV ELASTIC_CONTAINER true +WORKDIR /usr/share/elasticsearch +COPY --from=builder --chown=0:0 /usr/share/elasticsearch /usr/share/elasticsearch +COPY --from=builder --chown=0:0 /bin/tini /bin/tini +ENV PATH /usr/share/elasticsearch/bin:$PATH +ENV SHELL /bin/bash +COPY entrypoint.sh /usr/local/bin/entrypoint.sh + +RUN chmod g=u /etc/passwd && \ + chmod 0555 /usr/local/bin/entrypoint.sh && \ + find / -xdev -perm -4000 -exec chmod ug-s {} + && \ + chmod 0775 /usr/share/elasticsearch && \ + chown elasticsearch bin config config/jvm.options.d data logs plugins + +EXPOSE 9200 9300 + +ENTRYPOINT ["/bin/tini", "--", "/usr/local/bin/entrypoint.sh"] +CMD ["eswrapper"] +USER 1000:0 \ No newline at end of file diff --git a/Bigdata/elasticsearch/8.17.3/24.03-lts-sp1/config/elasticsearch.yml b/Bigdata/elasticsearch/8.17.3/24.03-lts-sp1/config/elasticsearch.yml new file mode 100644 index 0000000000000000000000000000000000000000..43e858ee4792f124ad07baa380548c3b6ae01e18 --- /dev/null +++ b/Bigdata/elasticsearch/8.17.3/24.03-lts-sp1/config/elasticsearch.yml @@ -0,0 +1,2 @@ +cluster.name: "docker-cluster" +network.host: 0.0.0.0 \ No newline at end of file diff --git a/Bigdata/elasticsearch/8.17.3/24.03-lts-sp1/config/log4j2.properties b/Bigdata/elasticsearch/8.17.3/24.03-lts-sp1/config/log4j2.properties new file mode 100644 index 0000000000000000000000000000000000000000..c0d67c8e81403b8a31201ec3ef650775074bb440 --- /dev/null +++ b/Bigdata/elasticsearch/8.17.3/24.03-lts-sp1/config/log4j2.properties @@ -0,0 +1,193 @@ +status = error + +######## Server JSON ############################ +appender.rolling.type = Console +appender.rolling.name = rolling +appender.rolling.layout.type = ECSJsonLayout +appender.rolling.layout.dataset = elasticsearch.server + +################################################ + +################################################ + +rootLogger.level = info +rootLogger.appenderRef.rolling.ref = rolling + +######## Deprecation JSON ####################### +appender.deprecation_rolling.type = Console +appender.deprecation_rolling.name = deprecation_rolling +appender.deprecation_rolling.layout.type = ECSJsonLayout +# Intentionally follows a different pattern to above +appender.deprecation_rolling.layout.dataset = deprecation.elasticsearch +appender.deprecation_rolling.filter.rate_limit.type = RateLimitingFilter + +appender.header_warning.type = HeaderWarningAppender +appender.header_warning.name = header_warning +################################################# + +logger.deprecation.name = org.elasticsearch.deprecation +logger.deprecation.level = WARN +logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling +logger.deprecation.appenderRef.header_warning.ref = header_warning +logger.deprecation.additivity = false + +######## Search slowlog JSON #################### +appender.index_search_slowlog_rolling.type = Console +appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling +appender.index_search_slowlog_rolling.layout.type = ECSJsonLayout +appender.index_search_slowlog_rolling.layout.dataset = elasticsearch.index_search_slowlog + +################################################# + +################################################# +logger.index_search_slowlog_rolling.name = index.search.slowlog +logger.index_search_slowlog_rolling.level = trace +logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling +logger.index_search_slowlog_rolling.additivity = false + +######## Indexing slowlog JSON ################## +appender.index_indexing_slowlog_rolling.type = Console +appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling +appender.index_indexing_slowlog_rolling.layout.type = ECSJsonLayout +appender.index_indexing_slowlog_rolling.layout.dataset = elasticsearch.index_indexing_slowlog + +################################################# + +logger.index_indexing_slowlog.name = index.indexing.slowlog.index +logger.index_indexing_slowlog.level = trace +logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling +logger.index_indexing_slowlog.additivity = false + +logger.org_apache_pdfbox.name = org.apache.pdfbox +logger.org_apache_pdfbox.level = off + +logger.org_apache_poi.name = org.apache.poi +logger.org_apache_poi.level = off + +logger.org_apache_fontbox.name = org.apache.fontbox +logger.org_apache_fontbox.level = off + +logger.org_apache_xmlbeans.name = org.apache.xmlbeans +logger.org_apache_xmlbeans.level = off + +logger.com_amazonaws.name = com.amazonaws +logger.com_amazonaws.level = warn + +logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.name = com.amazonaws.jmx.SdkMBeanRegistrySupport +logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.level = error + +logger.com_amazonaws_metrics_AwsSdkMetrics.name = com.amazonaws.metrics.AwsSdkMetrics +logger.com_amazonaws_metrics_AwsSdkMetrics.level = error + +logger.com_amazonaws_auth_profile_internal_BasicProfileConfigFileLoader.name = com.amazonaws.auth.profile.internal.BasicProfileConfigFileLoader +logger.com_amazonaws_auth_profile_internal_BasicProfileConfigFileLoader.level = error + +logger.com_amazonaws_services_s3_internal_UseArnRegionResolver.name = com.amazonaws.services.s3.internal.UseArnRegionResolver +logger.com_amazonaws_services_s3_internal_UseArnRegionResolver.level = error + +appender.audit_rolling.type = Console +appender.audit_rolling.name = audit_rolling +appender.audit_rolling.layout.type = PatternLayout +appender.audit_rolling.layout.pattern = {\ + "type":"audit", \ + "timestamp":"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZ}"\ + %varsNotEmpty{, "cluster.name":"%enc{%map{cluster.name}}{JSON}"}\ + %varsNotEmpty{, "cluster.uuid":"%enc{%map{cluster.uuid}}{JSON}"}\ + %varsNotEmpty{, "node.name":"%enc{%map{node.name}}{JSON}"}\ + %varsNotEmpty{, "node.id":"%enc{%map{node.id}}{JSON}"}\ + %varsNotEmpty{, "host.name":"%enc{%map{host.name}}{JSON}"}\ + %varsNotEmpty{, "host.ip":"%enc{%map{host.ip}}{JSON}"}\ + %varsNotEmpty{, "event.type":"%enc{%map{event.type}}{JSON}"}\ + %varsNotEmpty{, "event.action":"%enc{%map{event.action}}{JSON}"}\ + %varsNotEmpty{, "authentication.type":"%enc{%map{authentication.type}}{JSON}"}\ + %varsNotEmpty{, "user.name":"%enc{%map{user.name}}{JSON}"}\ + %varsNotEmpty{, "user.run_by.name":"%enc{%map{user.run_by.name}}{JSON}"}\ + %varsNotEmpty{, "user.run_as.name":"%enc{%map{user.run_as.name}}{JSON}"}\ + %varsNotEmpty{, "user.realm":"%enc{%map{user.realm}}{JSON}"}\ + %varsNotEmpty{, "user.realm_domain":"%enc{%map{user.realm_domain}}{JSON}"}\ + %varsNotEmpty{, "user.run_by.realm":"%enc{%map{user.run_by.realm}}{JSON}"}\ + %varsNotEmpty{, "user.run_by.realm_domain":"%enc{%map{user.run_by.realm_domain}}{JSON}"}\ + %varsNotEmpty{, "user.run_as.realm":"%enc{%map{user.run_as.realm}}{JSON}"}\ + %varsNotEmpty{, "user.run_as.realm_domain":"%enc{%map{user.run_as.realm_domain}}{JSON}"}\ + %varsNotEmpty{, "user.roles":%map{user.roles}}\ + %varsNotEmpty{, "apikey.id":"%enc{%map{apikey.id}}{JSON}"}\ + %varsNotEmpty{, "apikey.name":"%enc{%map{apikey.name}}{JSON}"}\ + %varsNotEmpty{, "authentication.token.name":"%enc{%map{authentication.token.name}}{JSON}"}\ + %varsNotEmpty{, "authentication.token.type":"%enc{%map{authentication.token.type}}{JSON}"}\ + %varsNotEmpty{, "cross_cluster_access":%map{cross_cluster_access}}\ + %varsNotEmpty{, "origin.type":"%enc{%map{origin.type}}{JSON}"}\ + %varsNotEmpty{, "origin.address":"%enc{%map{origin.address}}{JSON}"}\ + %varsNotEmpty{, "realm":"%enc{%map{realm}}{JSON}"}\ + %varsNotEmpty{, "realm_domain":"%enc{%map{realm_domain}}{JSON}"}\ + %varsNotEmpty{, "url.path":"%enc{%map{url.path}}{JSON}"}\ + %varsNotEmpty{, "url.query":"%enc{%map{url.query}}{JSON}"}\ + %varsNotEmpty{, "request.method":"%enc{%map{request.method}}{JSON}"}\ + %varsNotEmpty{, "request.body":"%enc{%map{request.body}}{JSON}"}\ + %varsNotEmpty{, "request.id":"%enc{%map{request.id}}{JSON}"}\ + %varsNotEmpty{, "action":"%enc{%map{action}}{JSON}"}\ + %varsNotEmpty{, "request.name":"%enc{%map{request.name}}{JSON}"}\ + %varsNotEmpty{, "indices":%map{indices}}\ + %varsNotEmpty{, "opaque_id":"%enc{%map{opaque_id}}{JSON}"}\ + %varsNotEmpty{, "trace.id":"%enc{%map{trace.id}}{JSON}"}\ + %varsNotEmpty{, "x_forwarded_for":"%enc{%map{x_forwarded_for}}{JSON}"}\ + %varsNotEmpty{, "transport.profile":"%enc{%map{transport.profile}}{JSON}"}\ + %varsNotEmpty{, "rule":"%enc{%map{rule}}{JSON}"}\ + %varsNotEmpty{, "put":%map{put}}\ + %varsNotEmpty{, "delete":%map{delete}}\ + %varsNotEmpty{, "change":%map{change}}\ + %varsNotEmpty{, "create":%map{create}}\ + %varsNotEmpty{, "invalidate":%map{invalidate}}\ + }%n +# "node.name" node name from the `elasticsearch.yml` settings +# "node.id" node id which should not change between cluster restarts +# "host.name" unresolved hostname of the local node +# "host.ip" the local bound ip (i.e. the ip listening for connections) +# "origin.type" a received REST request is translated into one or more transport requests. This indicates which processing layer generated the event "rest" or "transport" (internal) +# "event.action" the name of the audited event, eg. "authentication_failed", "access_granted", "run_as_granted", etc. +# "authentication.type" one of "realm", "api_key", "token", "anonymous" or "internal" +# "user.name" the subject name as authenticated by a realm +# "user.run_by.name" the original authenticated subject name that is impersonating another one. +# "user.run_as.name" if this "event.action" is of a run_as type, this is the subject name to be impersonated as. +# "user.realm" the name of the realm that authenticated "user.name" +# "user.realm_domain" if "user.realm" is under a domain, this is the name of the domain +# "user.run_by.realm" the realm name of the impersonating subject ("user.run_by.name") +# "user.run_by.realm_domain" if "user.run_by.realm" is under a domain, this is the name of the domain +# "user.run_as.realm" if this "event.action" is of a run_as type, this is the realm name the impersonated user is looked up from +# "user.run_as.realm_domain" if "user.run_as.realm" is under a domain, this is the name of the domain +# "user.roles" the roles array of the user; these are the roles that are granting privileges +# "apikey.id" this field is present if and only if the "authentication.type" is "api_key" +# "apikey.name" this field is present if and only if the "authentication.type" is "api_key" +# "authentication.token.name" this field is present if and only if the authenticating credential is a service account token +# "authentication.token.type" this field is present if and only if the authenticating credential is a service account token +# "cross_cluster_access" this field is present if and only if the associated authentication occurred cross cluster +# "event.type" informs about what internal system generated the event; possible values are "rest", "transport", "ip_filter" and "security_config_change" +# "origin.address" the remote address and port of the first network hop, i.e. a REST proxy or another cluster node +# "realm" name of a realm that has generated an "authentication_failed" or an "authentication_successful"; the subject is not yet authenticated +# "realm_domain" if "realm" is under a domain, this is the name of the domain +# "url.path" the URI component between the port and the query string; it is percent (URL) encoded +# "url.query" the URI component after the path and before the fragment; it is percent (URL) encoded +# "request.method" the method of the HTTP request, i.e. one of GET, POST, PUT, DELETE, OPTIONS, HEAD, PATCH, TRACE, CONNECT +# "request.body" the content of the request body entity, JSON escaped +# "request.id" a synthetic identifier for the incoming request, this is unique per incoming request, and consistent across all audit events generated by that request +# "action" an action is the most granular operation that is authorized and this identifies it in a namespaced way (internal) +# "request.name" if the event is in connection to a transport message this is the name of the request class, similar to how rest requests are identified by the url path (internal) +# "indices" the array of indices that the "action" is acting upon +# "opaque_id" opaque value conveyed by the "X-Opaque-Id" request header +# "trace_id" an identifier conveyed by the part of "traceparent" request header +# "x_forwarded_for" the addresses from the "X-Forwarded-For" request header, as a verbatim string value (not an array) +# "transport.profile" name of the transport profile in case this is a "connection_granted" or "connection_denied" event +# "rule" name of the applied rule if the "origin.type" is "ip_filter" +# the "put", "delete", "change", "create", "invalidate" fields are only present +# when the "event.type" is "security_config_change" and contain the security config change (as an object) taking effect + +logger.xpack_security_audit_logfile.name = org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail +logger.xpack_security_audit_logfile.level = info +logger.xpack_security_audit_logfile.appenderRef.audit_rolling.ref = audit_rolling +logger.xpack_security_audit_logfile.additivity = false + +logger.xmlsig.name = org.apache.xml.security.signature.XMLSignature +logger.xmlsig.level = error +logger.samlxml_decrypt.name = org.opensaml.xmlsec.encryption.support.Decrypter +logger.samlxml_decrypt.level = fatal +logger.saml2_decrypt.name = org.opensaml.saml.saml2.encryption.Decrypter +logger.saml2_decrypt.level = fatal \ No newline at end of file diff --git a/Bigdata/elasticsearch/8.17.3/24.03-lts-sp1/entrypoint.sh b/Bigdata/elasticsearch/8.17.3/24.03-lts-sp1/entrypoint.sh new file mode 100644 index 0000000000000000000000000000000000000000..83eb05251f3c65c1f13c9f64cbb8e765b35d9490 --- /dev/null +++ b/Bigdata/elasticsearch/8.17.3/24.03-lts-sp1/entrypoint.sh @@ -0,0 +1,84 @@ +#!/bin/bash +set -e + +# Files created by Elasticsearch should always be group writable too +umask 0002 + +# Allow user specify custom CMD, maybe bin/elasticsearch itself +# for example to directly specify `-E` style parameters for elasticsearch on k8s +# or simply to run /bin/bash to check the image +if [[ "$1" == "eswrapper" || $(basename "$1") == "elasticsearch" ]]; then + # Rewrite CMD args to remove the explicit command, + # so that we are backwards compatible with the docs + # from the previous Elasticsearch versions < 6 + # and configuration option: + # https://www.elastic.co/guide/en/elasticsearch/reference/5.6/docker.html#_d_override_the_image_8217_s_default_ulink_url_https_docs_docker_com_engine_reference_run_cmd_default_command_or_options_cmd_ulink + # Without this, user could specify `elasticsearch -E x.y=z` but + # `bin/elasticsearch -E x.y=z` would not work. In any case, + # we want to continue through this script, and not exec early. + set -- "${@:2}" +else + # Run whatever command the user wanted + exec "$@" +fi + +# Allow environment variables to be set by creating a file with the +# contents, and setting an environment variable with the suffix _FILE to +# point to it. This can be used to provide secrets to a container, without +# the values being specified explicitly when running the container. +# +# This is also sourced in elasticsearch-env, and is only needed here +# as well because we use ELASTIC_PASSWORD below. Sourcing this script +# is idempotent. +source /usr/share/elasticsearch/bin/elasticsearch-env-from-file + +if [[ -f bin/elasticsearch-users ]]; then + # Check for the ELASTIC_PASSWORD environment variable to set the + # bootstrap password for Security. + # + # This is only required for the first node in a cluster with Security + # enabled, but we have no way of knowing which node we are yet. We'll just + # honor the variable if it's present. + if [[ -n "$ELASTIC_PASSWORD" ]]; then + [[ -f /usr/share/elasticsearch/config/elasticsearch.keystore ]] || (elasticsearch-keystore create) + if ! (elasticsearch-keystore has-passwd --silent) ; then + # keystore is unencrypted + if ! (elasticsearch-keystore list | grep -q '^bootstrap.password$'); then + (echo "$ELASTIC_PASSWORD" | elasticsearch-keystore add -x 'bootstrap.password') + fi + else + # keystore requires password + if ! (echo "$KEYSTORE_PASSWORD" \ + | elasticsearch-keystore list | grep -q '^bootstrap.password$') ; then + COMMANDS="$(printf "%s\n%s" "$KEYSTORE_PASSWORD" "$ELASTIC_PASSWORD")" + (echo "$COMMANDS" | elasticsearch-keystore add -x 'bootstrap.password') + fi + fi + fi +fi + +if [[ -n "$ES_LOG_STYLE" ]]; then + case "$ES_LOG_STYLE" in + console) + # This is the default. Nothing to do. + ;; + file) + # Overwrite the default config with the stack config. Do this as a + # copy, not a move, in case the container is restarted. + cp -f /usr/share/elasticsearch/config/log4j2.file.properties /usr/share/elasticsearch/config/log4j2.properties + ;; + *) + echo "ERROR: ES_LOG_STYLE set to [$ES_LOG_STYLE]. Expected [console] or [file]" >&2 + exit 1 ;; + esac +fi + +if [[ -n "$ENROLLMENT_TOKEN" ]]; then + POSITIONAL_PARAMETERS="--enrollment-token $ENROLLMENT_TOKEN" +else + POSITIONAL_PARAMETERS="" +fi + +# Signal forwarding and child reaping is handled by `tini`, which is the +# actual entrypoint of the container +exec /usr/share/elasticsearch/bin/elasticsearch "$@" $POSITIONAL_PARAMETERS <<<"$KEYSTORE_PASSWORD" \ No newline at end of file diff --git a/Bigdata/elasticsearch/meta.yml b/Bigdata/elasticsearch/meta.yml new file mode 100644 index 0000000000000000000000000000000000000000..86503555591d3ac05e557db81d0d0f4aaf867be8 --- /dev/null +++ b/Bigdata/elasticsearch/meta.yml @@ -0,0 +1,2 @@ +8.17.3-oe2403sp1: + path: 8.17.3/24.03-lts-sp1/Dockerfile \ No newline at end of file diff --git a/Bigdata/image-list.yml b/Bigdata/image-list.yml index 99ea996b1baa1c8816d90b234e0fbec442ac4ef0..d325d127dbd027c7ef44477c6df301fb0739dc20 100644 --- a/Bigdata/image-list.yml +++ b/Bigdata/image-list.yml @@ -2,4 +2,9 @@ images: flink: flink impala: impala kafka: kafka - spark: spark \ No newline at end of file + spark: spark + consul: consul + doris: doris + elasticsearch: elasticsearch + kibana: kibana + logstash: logstash \ No newline at end of file diff --git a/Bigdata/kibana/8.17.3/24.03-lts-sp1/Dockerfile b/Bigdata/kibana/8.17.3/24.03-lts-sp1/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..9820d4f1b8e6da2230a4176e32449d09ef345d8d --- /dev/null +++ b/Bigdata/kibana/8.17.3/24.03-lts-sp1/Dockerfile @@ -0,0 +1,60 @@ +ARG BASE=openeuler/openeuler:24.03-lts-sp1 +FROM ${BASE} AS builder + +ARG TARGETARCH +ARG BUILDARCH +ARG VERSION=8.17.3 +ARG TINI_VERSION=0.19.0 + +RUN if [ "$TARGETARCH" = "amd64" ]; then \ + BUILDARCH="x86_64"; \ + elif [ "$TARGETARCH" = "arm64" ]; then \ + BUILDARCH="aarch64"; \ + fi; \ + curl --retry 10 -s -L --output /tmp/kibana.tar.gz https://artifacts.elastic.co/downloads/kibana/kibana-${VERSION}-linux-${BUILDARCH}.tar.gz +RUN mkdir /usr/share/kibana + +WORKDIR /usr/share/kibana +RUN tar -zxf /tmp/kibana.tar.gz --strip-components=1 +RUN chmod -R g=u /usr/share/kibana +RUN curl --retry 10 -S -L -O https://github.com/krallin/tini/releases/download/v${TINI_VERSION}/tini-${TARGETARCH} ; \ + curl --retry 10 -S -L -O https://github.com/krallin/tini/releases/download/v${TINI_VERSION}/tini-${TARGETARCH}.sha256sum ; \ + sha256sum -c tini-${TARGETARCH}.sha256sum ; \ + rm tini-${TARGETARCH}.sha256sum ; \ + mv tini-${TARGETARCH} /bin/tini ; \ + chmod 0555 /bin/tini +RUN mkdir -p /usr/share/fonts/local && \ + curl --retry 10 -S -L -o /usr/share/fonts/local/NotoSansCJK-Regular.ttc https://github.com/googlefonts/noto-cjk/raw/NotoSansV2.001/NotoSansCJK-Regular.ttc && \ + echo "5dcd1c336cc9344cb77c03a0cd8982ca8a7dc97d620fd6c9c434e02dcb1ceeb3 /usr/share/fonts/local/NotoSansCJK-Regular.ttc" | sha256sum -c - + + +FROM ${BASE} +EXPOSE 5601 + +RUN yum install -y shadow findutils fontconfig liberation-fonts nss ca-certificates + +COPY --from=builder --chown=1000:0 /usr/share/kibana /usr/share/kibana +COPY --from=builder --chown=0:0 /bin/tini /bin/tini +COPY --from=builder --chown=0:0 /usr/share/fonts/local/NotoSansCJK-Regular.ttc /usr/share/fonts/local/NotoSansCJK-Regular.ttc +RUN fc-cache -v + +WORKDIR /usr/share/kibana +RUN ln -s /usr/share/kibana /opt/kibana + +ENV ELASTIC_CONTAINER true +ENV PATH=/usr/share/kibana/bin:$PATH + +COPY --chown=1000:0 config/kibana.yml /usr/share/kibana/config/kibana.yml +COPY kibana-docker /usr/local/bin/ +RUN chmod g+ws /usr/share/kibana && \ + chmod 0555 /usr/local/bin/kibana-docker +RUN find /usr/share/kibana -gid 0 -and -not -perm /g+w -exec chmod g+w {} \; && \ + find / -xdev -perm -4000 -exec chmod u-s {} + && \ + groupadd -g 1000 kibana && \ + useradd -u 1000 -g 1000 -G 0 -d /usr/share/kibana -M kibana && \ + yum remove -y shadow findutils && yum clean all + + +ENTRYPOINT ["/bin/tini", "--"] +CMD ["/usr/local/bin/kibana-docker"] +USER 1000 \ No newline at end of file diff --git a/Bigdata/kibana/8.17.3/24.03-lts-sp1/config/kibana.yml b/Bigdata/kibana/8.17.3/24.03-lts-sp1/config/kibana.yml new file mode 100644 index 0000000000000000000000000000000000000000..230ba1c26df823d082431e1938ea065e9297e4c8 --- /dev/null +++ b/Bigdata/kibana/8.17.3/24.03-lts-sp1/config/kibana.yml @@ -0,0 +1,9 @@ +# +# ** THIS IS AN AUTO-GENERATED FILE ** +# + +# Default Kibana configuration for docker target +server.host: "0.0.0.0" +server.shutdownTimeout: "5s" +elasticsearch.hosts: [ "http://elasticsearch:9200" ] +monitoring.ui.container.elasticsearch.enabled: true \ No newline at end of file diff --git a/Bigdata/kibana/8.17.3/24.03-lts-sp1/kibana-docker b/Bigdata/kibana/8.17.3/24.03-lts-sp1/kibana-docker new file mode 100644 index 0000000000000000000000000000000000000000..335e08acfab5e67bab267d53f43be77078ca0b0d --- /dev/null +++ b/Bigdata/kibana/8.17.3/24.03-lts-sp1/kibana-docker @@ -0,0 +1,472 @@ +#!/bin/bash +# +# ** THIS IS AN AUTO-GENERATED FILE ** +# + +# Run Kibana, using environment variables to set longopts defining Kibana's +# configuration. +# +# eg. Setting the environment variable: +# +# ELASTICSEARCH_LOGQUERIES=true +# +# will cause Kibana to be invoked with: +# +# --elasticsearch.logQueries=true + +kibana_vars=( + apm_oss.apmAgentConfigurationIndex + apm_oss.errorIndices + apm_oss.indexPattern + apm_oss.metricsIndices + apm_oss.onboardingIndices + apm_oss.sourcemapIndices + apm_oss.spanIndices + apm_oss.transactionIndices + console.proxyConfig + console.proxyFilter + csp.strict + csp.warnLegacyBrowsers + csp.disableUnsafeEval + csp.script_src + csp.worker_src + csp.style_src + csp.connect_src + csp.default_src + csp.font_src + csp.frame_src + csp.img_src + csp.frame_ancestors + csp.report_uri + csp.report_to + csp.report_only.form_action + permissionsPolicy.report_to + data.autocomplete.valueSuggestions.terminateAfter + data.autocomplete.valueSuggestions.timeout + data.search.asyncSearch.waitForCompletion + data.search.asyncSearch.keepAlive + data.search.asyncSearch.batchedReduceSize + data.search.asyncSearch.pollInterval + data.search.sessions.defaultExpiration + data.search.sessions.enabled + data.search.sessions.maxUpdateRetries + data.search.sessions.notTouchedInProgressTimeout + data.search.sessions.notTouchedTimeout + data.search.sessions.pageSize + data.search.sessions.trackingInterval + unifiedSearch.autocomplete.valueSuggestions.terminateAfter + unifiedSearch.autocomplete.valueSuggestions.timeout + unifiedSearch.autocomplete.querySuggestions.enabled + unifiedSearch.autocomplete.valueSuggestions.enabled + unifiedSearch.autocomplete.valueSuggestions.tiers + elasticsearch.customHeaders + elasticsearch.hosts + elasticsearch.logQueries + elasticsearch.password + elasticsearch.pingTimeout + elasticsearch.publicBaseUrl + elasticsearch.requestHeadersWhitelist + elasticsearch.requestTimeout + elasticsearch.serviceAccountToken + elasticsearch.shardTimeout + elasticsearch.sniffInterval + elasticsearch.sniffOnConnectionFault + elasticsearch.sniffOnStart + elasticsearch.ssl.alwaysPresentCertificate + elasticsearch.ssl.certificate + elasticsearch.ssl.certificateAuthorities + elasticsearch.ssl.key + elasticsearch.ssl.keyPassphrase + elasticsearch.ssl.keystore.password + elasticsearch.ssl.keystore.path + elasticsearch.ssl.truststore.password + elasticsearch.ssl.truststore.path + elasticsearch.ssl.verificationMode + elasticsearch.username + enterpriseSearch.accessCheckTimeout + enterpriseSearch.accessCheckTimeoutWarning + enterpriseSearch.host + externalUrl.policy + i18n.locale + interactiveSetup.enabled + interactiveSetup.connectionCheck.interval + kibana.autocompleteTerminateAfter + kibana.autocompleteTimeout + kibana.index + logging.appenders + logging.appenders.console + logging.appenders.file + logging.loggers + logging.loggers.appenders + logging.loggers.level + logging.loggers.name + logging.root + logging.root.appenders + logging.root.level + map.emsUrl + map.includeElasticMapsService + map.tilemap.options.attribution + map.tilemap.options.maxZoom + map.tilemap.options.minZoom + map.tilemap.options.subdomains + map.tilemap.url + migrations.batchSize + migrations.maxBatchSizeBytes + migrations.pollInterval + migrations.retryAttempts + migrations.scrollDuration + migrations.skip + monitoring.cluster_alerts.email_notifications.email_address + monitoring.kibana.collection.enabled + monitoring.kibana.collection.interval + monitoring.ui.ccs.enabled + monitoring.ui.container.elasticsearch.enabled + monitoring.ui.container.logstash.enabled + monitoring.ui.elasticsearch.hosts + monitoring.ui.elasticsearch.logFetchCount + monitoring.ui.elasticsearch.password + monitoring.ui.elasticsearch.pingTimeout + monitoring.ui.elasticsearch.ssl.certificateAuthorities + monitoring.ui.elasticsearch.ssl.verificationMode + monitoring.ui.elasticsearch.username + monitoring.ui.enabled + monitoring.ui.logs.index + monitoring.ui.max_bucket_size + monitoring.ui.min_interval_seconds + newsfeed.enabled + node.roles + ops.cGroupOverrides.cpuAcctPath + ops.cGroupOverrides.cpuPath + ops.interval + path.data + pid.file + profiler.signal + regionmap + savedObjects.maxImportExportSize + savedObjects.maxImportPayloadBytes + savedObjects.allowHttpApiAccess + security.showInsecureClusterWarning + server.basePath + server.cdn.url + server.compression.enabled + server.compression.referrerWhitelist + server.cors + server.cors.allowCredentials + server.cors.allowOrigin + server.cors.enabled + server.cors.origin + server.customResponseHeaders + server.defaultRoute + server.host + server.http2.allowUnsecure + server.keepAliveTimeout + server.maxPayload + server.maxPayloadBytes + server.name + server.port + server.protocol + server.publicBaseUrl + server.requestId.allowFromAnyIp + server.requestId.ipAllowlist + server.rewriteBasePath + server.restrictInternalApis + server.securityResponseHeaders.disableEmbedding + server.securityResponseHeaders.permissionsPolicy + server.securityResponseHeaders.referrerPolicy + server.securityResponseHeaders.strictTransportSecurity + server.securityResponseHeaders.xContentTypeOptions + server.securityResponseHeaders.crossOriginOpenerPolicy + server.shutdownTimeout + server.socketTimeout + server.ssl.cert + server.ssl.certificate + server.ssl.certificateAuthorities + server.ssl.cipherSuites + server.ssl.clientAuthentication + server.ssl.enabled + server.ssl.key + server.ssl.keyPassphrase + server.ssl.keystore.password + server.ssl.keystore.path + server.ssl.redirectHttpFromPort + server.ssl.supportedProtocols + server.ssl.truststore.password + server.ssl.truststore.path + server.uuid + server.xsrf.allowlist + server.xsrf.disableProtection + status.allowAnonymous + status.v6ApiFormat + telemetry.allowChangingOptInStatus + telemetry.enabled + telemetry.hidePrivacyStatement + telemetry.optIn + telemetry.sendUsageTo + telemetry.sendUsageFrom + tilemap.options.attribution + tilemap.options.maxZoom + tilemap.options.minZoom + tilemap.options.subdomains + tilemap.url + vega.enableExternalUrls + vis_type_vega.enableExternalUrls + xpack.actions.allowedHosts + xpack.actions.customHostSettings + xpack.actions.email.domain_allowlist + xpack.actions.enableFooterInEmail + xpack.actions.enabledActionTypes + xpack.actions.maxResponseContentLength + xpack.actions.preconfigured + xpack.actions.preconfiguredAlertHistoryEsIndex + xpack.actions.proxyBypassHosts + xpack.actions.proxyHeaders + xpack.actions.proxyOnlyHosts + xpack.actions.proxyRejectUnauthorizedCertificates + xpack.actions.proxyUrl + xpack.actions.rejectUnauthorized + xpack.actions.responseTimeout + xpack.actions.ssl.proxyVerificationMode + xpack.actions.ssl.verificationMode + xpack.alerting.healthCheck.interval + xpack.alerting.invalidateApiKeysTask.interval + xpack.alerting.invalidateApiKeysTask.removalDelay + xpack.alerting.defaultRuleTaskTimeout + xpack.alerting.rules.run.timeout + xpack.alerting.rules.run.ruleTypeOverrides + xpack.alerting.cancelAlertsOnRuleTimeout + xpack.alerting.rules.minimumScheduleInterval.value + xpack.alerting.rules.minimumScheduleInterval.enforce + xpack.alerting.rules.run.actions.max + xpack.alerting.rules.run.alerts.max + xpack.alerting.rules.run.actions.connectorTypeOverrides + xpack.alerting.maxScheduledPerMinute + xpack.alerts.healthCheck.interval + xpack.alerts.invalidateApiKeysTask.interval + xpack.alerts.invalidateApiKeysTask.removalDelay + xpack.apm.indices.error + xpack.apm.indices.metric + xpack.apm.indices.onboarding + xpack.apm.indices.sourcemap + xpack.apm.indices.span + xpack.apm.indices.transaction + xpack.apm.maxServiceEnvironments + xpack.apm.searchAggregatedTransactions + xpack.apm.serviceMapEnabled + xpack.apm.serviceMapFingerprintBucketSize + xpack.apm.serviceMapFingerprintGlobalBucketSize + xpack.apm.ui.enabled + xpack.apm.ui.maxTraceItems + xpack.apm.ui.transactionGroupBucketSize + xpack.banners.backgroundColor + xpack.banners.disableSpaceBanners + xpack.banners.placement + xpack.banners.textColor + xpack.banners.textContent + xpack.cases.files.allowedMimeTypes + xpack.cases.files.maxSize + xpack.code.disk.thresholdEnabled + xpack.code.disk.watermarkLow + xpack.code.indexRepoFrequencyMs + xpack.code.lsp.verbose + xpack.code.maxWorkspace + xpack.code.security.enableGitCertCheck + xpack.code.security.gitHostWhitelist + xpack.code.security.gitProtocolWhitelist + xpack.code.ui.enabled + xpack.code.updateRepoFrequencyMs + xpack.code.verbose + xpack.data_enhanced.search.sessions.defaultExpiration + xpack.data_enhanced.search.sessions.enabled + xpack.data_enhanced.search.sessions.maxUpdateRetries + xpack.data_enhanced.search.sessions.notTouchedInProgressTimeout + xpack.data_enhanced.search.sessions.notTouchedTimeout + xpack.data_enhanced.search.sessions.pageSize + xpack.data_enhanced.search.sessions.trackingInterval + xpack.discoverEnhanced.actions.exploreDataInChart.enabled + xpack.discoverEnhanced.actions.exploreDataInContextMenu.enabled + xpack.encryptedSavedObjects.encryptionKey + xpack.encryptedSavedObjects.keyRotation.decryptionOnlyKeys + xpack.event_log.indexEntries + xpack.event_log.logEntries + xpack.fleet.agentPolicies + xpack.fleet.agents.elasticsearch.host + xpack.fleet.agents.elasticsearch.hosts + xpack.fleet.agents.enabled + xpack.fleet.agents.fleet_server.hosts + xpack.fleet.agents.kibana.host + xpack.fleet.agents.tlsCheckDisabled + xpack.fleet.packages + xpack.fleet.packageVerification.gpgKeyPath + xpack.fleet.registryProxyUrl + xpack.fleet.registryUrl + xpack.graph.canEditDrillDownUrls + xpack.graph.savePolicy + xpack.infra.query.partitionFactor + xpack.infra.query.partitionSize + xpack.infra.sources.default.fields.container + xpack.infra.sources.default.fields.host + xpack.infra.sources.default.fields.message + xpack.infra.sources.default.fields.pod + xpack.infra.sources.default.fields.tiebreaker + xpack.infra.sources.default.fields.timestamp + xpack.infra.sources.default.logAlias + xpack.infra.sources.default.metricAlias + xpack.ingestManager.fleet.tlsCheckDisabled + xpack.ingestManager.registryUrl + xpack.observability.annotations.index + xpack.observability.unsafe.alertDetails.metrics.enabled + xpack.observability.unsafe.alertDetails.logs.enabled + xpack.observability.unsafe.alertDetails.uptime.enabled + xpack.observability.unsafe.alertDetails.observability.enabled + xpack.observability.unsafe.thresholdRule.enabled + xpack.productDocBase.artifactRepositoryUrl + xpack.reporting.capture.browser.autoDownload + xpack.reporting.capture.browser.chromium.disableSandbox + xpack.reporting.capture.browser.chromium.inspect + xpack.reporting.capture.browser.chromium.maxScreenshotDimension + xpack.reporting.capture.browser.chromium.proxy.bypass + xpack.reporting.capture.browser.chromium.proxy.enabled + xpack.reporting.capture.browser.chromium.proxy.server + xpack.reporting.capture.browser.type + xpack.reporting.capture.concurrency + xpack.reporting.capture.loadDelay + xpack.reporting.capture.maxAttempts + xpack.reporting.capture.networkPolicy + xpack.reporting.capture.settleTime + xpack.reporting.capture.timeout + xpack.reporting.capture.timeouts.openUrl + xpack.reporting.capture.timeouts.openUrl + xpack.reporting.capture.timeouts.renderComplete + xpack.reporting.capture.timeouts.waitForElements + xpack.reporting.capture.viewport.height + xpack.reporting.capture.viewport.width + xpack.reporting.capture.zoom + xpack.reporting.csv.checkForFormulas + xpack.reporting.csv.enablePanelActionDownload + xpack.reporting.csv.escapeFormulaValues + xpack.reporting.csv.maxSizeBytes + xpack.reporting.csv.scroll.duration + xpack.reporting.csv.scroll.size + xpack.reporting.csv.scroll.strategy + xpack.reporting.csv.useByteOrderMarkEncoding + xpack.reporting.enabled + xpack.reporting.encryptionKey + xpack.reporting.kibanaApp + xpack.reporting.kibanaServer.hostname + xpack.reporting.kibanaServer.port + xpack.reporting.kibanaServer.protocol + xpack.reporting.poll.jobCompletionNotifier.interval + xpack.reporting.poll.jobCompletionNotifier.intervalErrorMultiplier + xpack.reporting.poll.jobsRefresh.interval + xpack.reporting.poll.jobsRefresh.intervalErrorMultiplier + xpack.reporting.queue.indexInterval + xpack.reporting.queue.pollEnabled + xpack.reporting.queue.pollInterval + xpack.reporting.queue.pollIntervalErrorMultiplier + xpack.reporting.queue.timeout + xpack.reporting.roles.allow + xpack.reporting.roles.enabled + xpack.ruleRegistry.write.enabled + xpack.screenshotting.browser.chromium.disableSandbox + xpack.security.accessAgreement.message + xpack.security.audit.appender.fileName + xpack.security.audit.appender.layout.highlight + xpack.security.audit.appender.layout.pattern + xpack.security.audit.appender.layout.type + xpack.security.audit.appender.legacyLoggingConfig + xpack.security.audit.appender.policy.interval + xpack.security.audit.appender.policy.modulate + xpack.security.audit.appender.policy.size + xpack.security.audit.appender.policy.type + xpack.security.audit.appender.strategy.max + xpack.security.audit.appender.strategy.pattern + xpack.security.audit.appender.strategy.type + xpack.security.audit.appender.type + xpack.security.audit.enabled + xpack.security.audit.ignore_filters + xpack.security.authc.http.autoSchemesEnabled + xpack.security.authc.http.enabled + xpack.security.authc.http.schemes + xpack.security.authc.oidc.realm + xpack.security.authc.providers + xpack.security.authc.saml.maxRedirectURLSize + xpack.security.authc.saml.realm + xpack.security.authc.selector.enabled + xpack.security.cookieName + xpack.security.encryptionKey + xpack.security.fipsMode.enabled + xpack.security.loginAssistanceMessage + xpack.security.loginHelp + xpack.security.sameSiteCookies + xpack.security.secureCookies + xpack.security.session.cleanupInterval + xpack.security.session.concurrentSessions.maxSessions + xpack.security.session.idleTimeout + xpack.security.session.lifespan + xpack.security.sessionTimeout + xpack.security.showInsecureClusterWarning + xpack.securitySolution.alertMergeStrategy + xpack.securitySolution.alertIgnoreFields + xpack.securitySolution.maxExceptionsImportSize + xpack.securitySolution.maxRuleImportExportSize + xpack.securitySolution.maxRuleImportPayloadBytes + xpack.securitySolution.maxTimelineImportExportSize + xpack.securitySolution.maxTimelineImportPayloadBytes + xpack.securitySolution.packagerTaskInterval + xpack.securitySolution.prebuiltRulesPackageVersion + xpack.spaces.maxSpaces + xpack.task_manager.capacity + xpack.task_manager.claim_strategy + xpack.task_manager.auto_calculate_default_ech_capacity + xpack.task_manager.discovery.active_nodes_lookback + xpack.task_manager.discovery.interval + xpack.task_manager.kibanas_per_partition + xpack.task_manager.max_attempts + xpack.task_manager.max_workers + xpack.task_manager.monitored_aggregated_stats_refresh_rate + xpack.task_manager.monitored_stats_required_freshness + xpack.task_manager.monitored_stats_running_average_window + xpack.task_manager.monitored_stats_health_verbose_log.enabled + xpack.task_manager.monitored_stats_health_verbose_log.warn_delayed_task_start_in_seconds + xpack.task_manager.monitored_task_execution_thresholds + xpack.task_manager.poll_interval + xpack.task_manager.request_capacity + xpack.task_manager.version_conflict_threshold + xpack.task_manager.event_loop_delay.monitor + xpack.task_manager.event_loop_delay.warn_threshold + xpack.task_manager.worker_utilization_running_average_window + xpack.uptime.index + serverless +) + +longopts='' +for kibana_var in ${kibana_vars[*]}; do + # 'elasticsearch.hosts' -> 'ELASTICSEARCH_HOSTS' + env_var=$(echo ${kibana_var^^} | tr . _) + + # Indirectly lookup env var values via the name of the var. + # REF: http://tldp.org/LDP/abs/html/bashver2.html#EX78 + value=${!env_var} + if [[ -n $value ]]; then + longopt="--${kibana_var}=${value}" + longopts+=" ${longopt}" + fi +done + +# Files created at run-time should be group-writable, for Openshift's sake. +umask 0002 + +# The virtual file /proc/self/cgroup should list the current cgroup +# membership. For each hierarchy, you can follow the cgroup path from +# this file to the cgroup filesystem (usually /sys/fs/cgroup/) and +# introspect the statistics for the cgroup for the given +# hierarchy. Alas, Docker breaks this by mounting the container +# statistics at the root while leaving the cgroup paths as the actual +# paths. Therefore, Kibana provides a mechanism to override +# reading the cgroup path from /proc/self/cgroup and instead uses the +# cgroup path defined the configuration properties +# ops.cGroupOverrides.cpuPath and ops.cGroupOverrides.cpuAcctPath. +# Therefore, we set this value here so that cgroup statistics are +# available for the container this process will run in. + +exec /usr/share/kibana/bin/kibana --ops.cGroupOverrides.cpuPath=/ --ops.cGroupOverrides.cpuAcctPath=/ ${longopts} "$@" \ No newline at end of file diff --git a/Bigdata/kibana/meta.yml b/Bigdata/kibana/meta.yml new file mode 100644 index 0000000000000000000000000000000000000000..86503555591d3ac05e557db81d0d0f4aaf867be8 --- /dev/null +++ b/Bigdata/kibana/meta.yml @@ -0,0 +1,2 @@ +8.17.3-oe2403sp1: + path: 8.17.3/24.03-lts-sp1/Dockerfile \ No newline at end of file diff --git a/Bigdata/logstash/8.17.3/24.03-lts-sp1/.DS_Store b/Bigdata/logstash/8.17.3/24.03-lts-sp1/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..44ef12adbcce68cf26034bd3835e58b48106bbc9 Binary files /dev/null and b/Bigdata/logstash/8.17.3/24.03-lts-sp1/.DS_Store differ diff --git a/Bigdata/logstash/8.17.3/24.03-lts-sp1/Dockerfile b/Bigdata/logstash/8.17.3/24.03-lts-sp1/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..8befbed720e0c4482b5be0afdae0a8e8f6c7f276 --- /dev/null +++ b/Bigdata/logstash/8.17.3/24.03-lts-sp1/Dockerfile @@ -0,0 +1,47 @@ +ARG BASE=openeuler/openeuler:24.03-lts-sp1 +FROM ${BASE} + +ARG TARGETARCH +ARG BUILDARCH +ARG VERSION=8.17.3 + +RUN yum install -y shadow procps findutils tar gzip glibc-langpack-en +RUN groupadd -g 1000 logstash && \ + useradd -u 1000 -g 1000 -d /usr/share/logstash -M logstash + +RUN if [ "$TARGETARCH" = "amd64" ]; then \ + BUILDARCH="x86_64"; \ + elif [ "$TARGETARCH" = "arm64" ]; then \ + BUILDARCH="aarch64"; \ + fi; \ + curl --retry 10 -s -L --output /tmp/logstash.tar.gz https://artifacts.elastic.co/downloads/logstash/logstash-${VERSION}-linux-${BUILDARCH}.tar.gz + +WORKDIR /usr/share/logstash +RUN tar -zxf /tmp/logstash.tar.gz -C /usr/share/logstash --strip-components=1 && \ + chown --recursive logstash:logstash /usr/share/logstash/ && \ + chown -R logstash:root /usr/share/logstash && \ + chmod -R g=u /usr/share/logstash && \ + mkdir /licenses/ && \ + mv /usr/share/logstash/NOTICE.TXT /licenses/NOTICE.TXT && \ + mv /usr/share/logstash/LICENSE.txt /licenses/LICENSE.txt && \ + find /usr/share/logstash -type d -exec chmod g+s {} \; && \ + ln -s /usr/share/logstash /opt/logstash + + +ENV ELASTIC_CONTAINER true +ENV PATH=/usr/share/logstash/bin:$PATH +ENV LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8 + +COPY entrypoint.sh /usr/local/bin/ +COPY config/logstash-full.yml config/logstash.yml +COPY config/pipelines.yml config/log4j2.properties config/log4j2.file.properties config/ +COPY config/default.conf pipeline/logstash.conf +COPY env2yaml/env2yaml-${TARGETARCH} /usr/local/bin/env2yaml + +RUN chown --recursive logstash:root config/ pipeline/ +RUN chmod 0755 /usr/local/bin/entrypoint.sh && \ + chmod 0755 /usr/local/bin/env2yaml + +USER 1000 +EXPOSE 9600 5044 +ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] \ No newline at end of file diff --git a/Bigdata/logstash/8.17.3/24.03-lts-sp1/config/default.conf b/Bigdata/logstash/8.17.3/24.03-lts-sp1/config/default.conf new file mode 100644 index 0000000000000000000000000000000000000000..40785ed298617f3d0d259a6ab78e2da3bb96b5fb --- /dev/null +++ b/Bigdata/logstash/8.17.3/24.03-lts-sp1/config/default.conf @@ -0,0 +1,11 @@ +input { + beats { + port => 5044 + } +} + +output { + stdout { + codec => rubydebug + } +} diff --git a/Bigdata/logstash/8.17.3/24.03-lts-sp1/config/log4j2.file.properties b/Bigdata/logstash/8.17.3/24.03-lts-sp1/config/log4j2.file.properties new file mode 100644 index 0000000000000000000000000000000000000000..fc94a7e69e7f48939eb5c234ef0fe666f250570c --- /dev/null +++ b/Bigdata/logstash/8.17.3/24.03-lts-sp1/config/log4j2.file.properties @@ -0,0 +1,147 @@ +status = error +name = LogstashPropertiesConfig + +appender.console.type = Console +appender.console.name = plain_console +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c]%notEmpty{[%X{pipeline.id}]}%notEmpty{[%X{plugin.id}]} %m%n + +appender.json_console.type = Console +appender.json_console.name = json_console +appender.json_console.layout.type = JSONLayout +appender.json_console.layout.compact = true +appender.json_console.layout.eventEol = true + +appender.rolling.type = RollingFile +appender.rolling.name = plain_rolling +appender.rolling.fileName = ${sys:ls.logs}/logstash-plain.log +appender.rolling.filePattern = ${sys:ls.logs}/logstash-plain-%d{yyyy-MM-dd}-%i.log.gz +appender.rolling.policies.type = Policies +appender.rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.rolling.policies.time.interval = 1 +appender.rolling.policies.time.modulate = true +appender.rolling.layout.type = PatternLayout +appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c]%notEmpty{[%X{pipeline.id}]}%notEmpty{[%X{plugin.id}]} %m%n +appender.rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.rolling.policies.size.size = 100MB +appender.rolling.strategy.type = DefaultRolloverStrategy +appender.rolling.strategy.max = 30 +appender.rolling.avoid_pipelined_filter.type = PipelineRoutingFilter + +appender.json_rolling.type = RollingFile +appender.json_rolling.name = json_rolling +appender.json_rolling.fileName = ${sys:ls.logs}/logstash-json.log +appender.json_rolling.filePattern = ${sys:ls.logs}/logstash-json-%d{yyyy-MM-dd}-%i.log.gz +appender.json_rolling.policies.type = Policies +appender.json_rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.json_rolling.policies.time.interval = 1 +appender.json_rolling.policies.time.modulate = true +appender.json_rolling.layout.type = JSONLayout +appender.json_rolling.layout.compact = true +appender.json_rolling.layout.eventEol = true +appender.json_rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.json_rolling.policies.size.size = 100MB +appender.json_rolling.strategy.type = DefaultRolloverStrategy +appender.json_rolling.strategy.max = 30 +appender.json_rolling.avoid_pipelined_filter.type = PipelineRoutingFilter + +appender.routing.type = PipelineRouting +appender.routing.name = pipeline_routing_appender +appender.routing.pipeline.type = RollingFile +appender.routing.pipeline.name = appender-${ctx:pipeline.id} +appender.routing.pipeline.fileName = ${sys:ls.logs}/pipeline_${ctx:pipeline.id}.log +appender.routing.pipeline.filePattern = ${sys:ls.logs}/pipeline_${ctx:pipeline.id}.%i.log.gz +appender.routing.pipeline.layout.type = PatternLayout +appender.routing.pipeline.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n +appender.routing.pipeline.policy.type = SizeBasedTriggeringPolicy +appender.routing.pipeline.policy.size = 100MB +appender.routing.pipeline.strategy.type = DefaultRolloverStrategy +appender.routing.pipeline.strategy.max = 30 + +rootLogger.level = ${sys:ls.log.level} +rootLogger.appenderRef.console.ref = ${sys:ls.log.format}_console +rootLogger.appenderRef.rolling.ref = ${sys:ls.log.format}_rolling +rootLogger.appenderRef.routing.ref = pipeline_routing_appender + +# Slowlog + +appender.console_slowlog.type = Console +appender.console_slowlog.name = plain_console_slowlog +appender.console_slowlog.layout.type = PatternLayout +appender.console_slowlog.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n + +appender.json_console_slowlog.type = Console +appender.json_console_slowlog.name = json_console_slowlog +appender.json_console_slowlog.layout.type = JSONLayout +appender.json_console_slowlog.layout.compact = true +appender.json_console_slowlog.layout.eventEol = true + +appender.rolling_slowlog.type = RollingFile +appender.rolling_slowlog.name = plain_rolling_slowlog +appender.rolling_slowlog.fileName = ${sys:ls.logs}/logstash-slowlog-plain.log +appender.rolling_slowlog.filePattern = ${sys:ls.logs}/logstash-slowlog-plain-%d{yyyy-MM-dd}-%i.log.gz +appender.rolling_slowlog.policies.type = Policies +appender.rolling_slowlog.policies.time.type = TimeBasedTriggeringPolicy +appender.rolling_slowlog.policies.time.interval = 1 +appender.rolling_slowlog.policies.time.modulate = true +appender.rolling_slowlog.layout.type = PatternLayout +appender.rolling_slowlog.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n +appender.rolling_slowlog.policies.size.type = SizeBasedTriggeringPolicy +appender.rolling_slowlog.policies.size.size = 100MB +appender.rolling_slowlog.strategy.type = DefaultRolloverStrategy +appender.rolling_slowlog.strategy.max = 30 + +appender.json_rolling_slowlog.type = RollingFile +appender.json_rolling_slowlog.name = json_rolling_slowlog +appender.json_rolling_slowlog.fileName = ${sys:ls.logs}/logstash-slowlog-json.log +appender.json_rolling_slowlog.filePattern = ${sys:ls.logs}/logstash-slowlog-json-%d{yyyy-MM-dd}-%i.log.gz +appender.json_rolling_slowlog.policies.type = Policies +appender.json_rolling_slowlog.policies.time.type = TimeBasedTriggeringPolicy +appender.json_rolling_slowlog.policies.time.interval = 1 +appender.json_rolling_slowlog.policies.time.modulate = true +appender.json_rolling_slowlog.layout.type = JSONLayout +appender.json_rolling_slowlog.layout.compact = true +appender.json_rolling_slowlog.layout.eventEol = true +appender.json_rolling_slowlog.policies.size.type = SizeBasedTriggeringPolicy +appender.json_rolling_slowlog.policies.size.size = 100MB +appender.json_rolling_slowlog.strategy.type = DefaultRolloverStrategy +appender.json_rolling_slowlog.strategy.max = 30 + +logger.slowlog.name = slowlog +logger.slowlog.level = trace +logger.slowlog.appenderRef.console_slowlog.ref = ${sys:ls.log.format}_console_slowlog +logger.slowlog.appenderRef.rolling_slowlog.ref = ${sys:ls.log.format}_rolling_slowlog +logger.slowlog.additivity = false + +logger.licensereader.name = logstash.licensechecker.licensereader +logger.licensereader.level = error + +# Silence http-client by default +logger.apache_http_client.name = org.apache.http +logger.apache_http_client.level = fatal + +# Deprecation log +appender.deprecation_rolling.type = RollingFile +appender.deprecation_rolling.name = deprecation_plain_rolling +appender.deprecation_rolling.fileName = ${sys:ls.logs}/logstash-deprecation.log +appender.deprecation_rolling.filePattern = ${sys:ls.logs}/logstash-deprecation-%d{yyyy-MM-dd}-%i.log.gz +appender.deprecation_rolling.policies.type = Policies +appender.deprecation_rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.deprecation_rolling.policies.time.interval = 1 +appender.deprecation_rolling.policies.time.modulate = true +appender.deprecation_rolling.layout.type = PatternLayout +appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c]%notEmpty{[%X{pipeline.id}]}%notEmpty{[%X{plugin.id}]} %m%n +appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.deprecation_rolling.policies.size.size = 100MB +appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy +appender.deprecation_rolling.strategy.max = 30 + +logger.deprecation.name = org.logstash.deprecation, deprecation +logger.deprecation.level = WARN +logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_plain_rolling +logger.deprecation.additivity = false + +logger.deprecation_root.name = deprecation +logger.deprecation_root.level = WARN +logger.deprecation_root.appenderRef.deprecation_rolling.ref = deprecation_plain_rolling +logger.deprecation_root.additivity = false \ No newline at end of file diff --git a/Bigdata/logstash/8.17.3/24.03-lts-sp1/config/log4j2.properties b/Bigdata/logstash/8.17.3/24.03-lts-sp1/config/log4j2.properties new file mode 100644 index 0000000000000000000000000000000000000000..f70d736933ebc6d92eef0dc5edf50e556b4ee041 --- /dev/null +++ b/Bigdata/logstash/8.17.3/24.03-lts-sp1/config/log4j2.properties @@ -0,0 +1,16 @@ +status = error +name = LogstashPropertiesConfig + +appender.console.type = Console +appender.console.name = plain_console +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c]%notEmpty{[%X{pipeline.id}]}%notEmpty{[%X{plugin.id}]} %m%n + +appender.json_console.type = Console +appender.json_console.name = json_console +appender.json_console.layout.type = JSONLayout +appender.json_console.layout.compact = true +appender.json_console.layout.eventEol = true + +rootLogger.level = ${sys:ls.log.level} +rootLogger.appenderRef.console.ref = ${sys:ls.log.format}_console \ No newline at end of file diff --git a/Bigdata/logstash/8.17.3/24.03-lts-sp1/config/logstash-full.yml b/Bigdata/logstash/8.17.3/24.03-lts-sp1/config/logstash-full.yml new file mode 100644 index 0000000000000000000000000000000000000000..e090a093f7fa909a6e81fd12405c36a87a530158 --- /dev/null +++ b/Bigdata/logstash/8.17.3/24.03-lts-sp1/config/logstash-full.yml @@ -0,0 +1,2 @@ +http.host: "0.0.0.0" +xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ] \ No newline at end of file diff --git a/Bigdata/logstash/8.17.3/24.03-lts-sp1/config/pipelines.yml b/Bigdata/logstash/8.17.3/24.03-lts-sp1/config/pipelines.yml new file mode 100644 index 0000000000000000000000000000000000000000..4fae603b2bbb89cdc7bb4cf0391808d76c9717f4 --- /dev/null +++ b/Bigdata/logstash/8.17.3/24.03-lts-sp1/config/pipelines.yml @@ -0,0 +1,6 @@ +# This file is where you define your pipelines. You can define multiple. +# For more information on multiple pipelines, see the documentation: +# https://www.elastic.co/guide/en/logstash/current/multiple-pipelines.html + +- pipeline.id: main + path.config: "/usr/share/logstash/pipeline" \ No newline at end of file diff --git a/Bigdata/logstash/8.17.3/24.03-lts-sp1/entrypoint.sh b/Bigdata/logstash/8.17.3/24.03-lts-sp1/entrypoint.sh new file mode 100644 index 0000000000000000000000000000000000000000..3831edd0087cdf6244a645a2a4c3ec5a6971b556 --- /dev/null +++ b/Bigdata/logstash/8.17.3/24.03-lts-sp1/entrypoint.sh @@ -0,0 +1,31 @@ +#!/bin/bash -e + +# Map environment variables to entries in logstash.yml. +# Note that this will mutate logstash.yml in place if any such settings are found. +# This may be undesirable, especially if logstash.yml is bind-mounted from the +# host system. +env2yaml /usr/share/logstash/config/logstash.yml + +if [[ -n "$LOG_STYLE" ]]; then + case "$LOG_STYLE" in + console) + # This is the default. Nothing to do. + ;; + file) + # Overwrite the default config with the stack config. Do this as a + # copy, not a move, in case the container is restarted. + cp -f /usr/share/logstash/config/log4j2.file.properties /usr/share/logstash/config/log4j2.properties + ;; + *) + echo "ERROR: LOG_STYLE set to [$LOG_STYLE]. Expected [console] or [file]" >&2 + exit 1 ;; + esac +fi + +export LS_JAVA_OPTS="-Dls.cgroup.cpuacct.path.override=/ -Dls.cgroup.cpu.path.override=/ $LS_JAVA_OPTS" + +if [[ -z $1 ]] || [[ ${1:0:1} == '-' ]] ; then + exec logstash "$@" +else + exec "$@" +fi \ No newline at end of file diff --git a/Bigdata/logstash/8.17.3/24.03-lts-sp1/env2yaml/.DS_Store b/Bigdata/logstash/8.17.3/24.03-lts-sp1/env2yaml/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..de74bcedf2e40644cdb21877f1bc0b85075dccc0 Binary files /dev/null and b/Bigdata/logstash/8.17.3/24.03-lts-sp1/env2yaml/.DS_Store differ diff --git a/Bigdata/logstash/8.17.3/24.03-lts-sp1/env2yaml/env2yaml-amd64 b/Bigdata/logstash/8.17.3/24.03-lts-sp1/env2yaml/env2yaml-amd64 new file mode 100644 index 0000000000000000000000000000000000000000..88c60bd4d5ae0dc5484f5fd7b80255e1ff077b5c Binary files /dev/null and b/Bigdata/logstash/8.17.3/24.03-lts-sp1/env2yaml/env2yaml-amd64 differ diff --git a/Bigdata/logstash/8.17.3/24.03-lts-sp1/env2yaml/env2yaml-arm64 b/Bigdata/logstash/8.17.3/24.03-lts-sp1/env2yaml/env2yaml-arm64 new file mode 100644 index 0000000000000000000000000000000000000000..b29efd502ac8ffd00623a6b1de591fa57e81caf8 Binary files /dev/null and b/Bigdata/logstash/8.17.3/24.03-lts-sp1/env2yaml/env2yaml-arm64 differ diff --git a/Bigdata/logstash/meta.yml b/Bigdata/logstash/meta.yml new file mode 100644 index 0000000000000000000000000000000000000000..86503555591d3ac05e557db81d0d0f4aaf867be8 --- /dev/null +++ b/Bigdata/logstash/meta.yml @@ -0,0 +1,2 @@ +8.17.3-oe2403sp1: + path: 8.17.3/24.03-lts-sp1/Dockerfile \ No newline at end of file diff --git a/Cloud/calico/3.29.3/24.03-lts-sp1/Dockerfile b/Cloud/calico/3.29.3/24.03-lts-sp1/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..7a9e04a3c7965948560809cb9fbb8dcdf009bf50 --- /dev/null +++ b/Cloud/calico/3.29.3/24.03-lts-sp1/Dockerfile @@ -0,0 +1,12 @@ +ARG BASE=openeuler/openeuler:24.03-lts-sp1 +FROM ${BASE} + +ARG TARGETARCH +ARG VERSION=3.29.3 + +RUN yum install -y gcc gcc-c++ java-1.8.0-openjdk javacc +RUN curl -fSL -o calicoctl https://github.com/projectcalico/calico/releases/download/v${VERSION}/calicoctl-linux-${TARGETARCH} && \ + mv calicoctl /usr/bin/calicoctl && \ + chmod 755 /usr/bin/calicoctl + +ENTRYPOINT ["/usr/bin/calicoctl"] \ No newline at end of file diff --git a/Cloud/calico/meta.yml b/Cloud/calico/meta.yml new file mode 100644 index 0000000000000000000000000000000000000000..676af6aa4fef1d0f30b7e9330df88ef7364fc0df --- /dev/null +++ b/Cloud/calico/meta.yml @@ -0,0 +1,2 @@ +3.29.3-oe2403sp1: + path: 3.29.3/24.03-lts-sp1/Dockerfile \ No newline at end of file diff --git a/Cloud/flannel/0.26.5/24.03-lts-sp1/Dockerfile b/Cloud/flannel/0.26.5/24.03-lts-sp1/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..16ab9a1ba6823a5c66078495c8e2c1f9b795b78a --- /dev/null +++ b/Cloud/flannel/0.26.5/24.03-lts-sp1/Dockerfile @@ -0,0 +1,25 @@ +ARG BASE=openeuler/openeuler:24.03-lts-sp1 +FROM ${BASE} + +ARG TARGETARCH +ARG VERSION=0.26.5 +ARG GO_VERSION=1.23.7 +ARG FLANNEL_URL=https://github.com/flannel-io/flannel/archive/refs/tags/v${VERSION}.tar.gz + +WORKDIR /app +RUN curl -fSL -o go.tar.gz https://golang.google.cn/dl/go${GO_VERSION}.linux-${TARGETARCH}.tar.gz && \ + tar -xvf go.tar.gz -C /usr/local +ENV PATH="/usr/local/go/bin:${PATH}" + +RUN yum install -y conntrack-tools ipset net-tools && \ + yum clean all +RUN curl -fSL ${FLANNEL_URL} -o flannel.tar.gz && \ + tar -xzf flannel.tar.gz && \ + cd flannel-* && \ + go build && \ + cp flannel /usr/bin/ && \ + cd .. && \ + rm -rf flannel-* flannel.tar.gz && \ + chmod a+x /usr/bin/flannel + +ENTRYPOINT ["/usr/bin/flannel"] \ No newline at end of file diff --git a/Cloud/flannel/meta.yml b/Cloud/flannel/meta.yml new file mode 100644 index 0000000000000000000000000000000000000000..2fa2f4b829b76ceb3781d75427a3296d44bb98a6 --- /dev/null +++ b/Cloud/flannel/meta.yml @@ -0,0 +1,2 @@ +0.26.5-oe2403sp1: + path: 0.26.5/24.03-lts-sp1/Dockerfile \ No newline at end of file diff --git a/Cloud/image-list.yml b/Cloud/image-list.yml index c3c56bcaa969e62fd828b1b5a121f3c89ed6a03e..f38aa52c094f19eea2543d273e9f623c49f89679 100644 --- a/Cloud/image-list.yml +++ b/Cloud/image-list.yml @@ -18,4 +18,6 @@ images: telegraf: telegraf tempo: tempo traefik: traefik - zookeeper: zookeeper \ No newline at end of file + zookeeper: zookeeper + flannel: flannel + istio: istio \ No newline at end of file diff --git a/Cloud/istio/1.25.1/24.03-lts-sp1/Dockerfile b/Cloud/istio/1.25.1/24.03-lts-sp1/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..b40f85b2caea54dabeddf9aef64bdfaa12b1db42 --- /dev/null +++ b/Cloud/istio/1.25.1/24.03-lts-sp1/Dockerfile @@ -0,0 +1,13 @@ +ARG BASE=openeuler/openeuler:24.03-lts-sp1 +FROM ${BASE} + +ARG TARGETARCH +ARG VERSION=1.25.1 + +RUN curl -fSL -o istio.tar.gz https://github.com/istio/istio/releases/download/${VERSION}/istio-${VERSION}-linux-${TARGETARCH}.tar.gz && \ + mkdir -p /usr/local/istio/ && \ + tar -zxf istio.tar.gz -C /usr/local/istio/ --strip-components=1 + +ENV PATH=$PATH:/usr/local/istio/bin/ + +CMD ["istioctl", "version"] diff --git a/Cloud/istio/meta.yml b/Cloud/istio/meta.yml new file mode 100644 index 0000000000000000000000000000000000000000..570fd66dfc7638939eaf9ab7310c9fb9c5c7dcfb --- /dev/null +++ b/Cloud/istio/meta.yml @@ -0,0 +1,2 @@ +1.25.1-oe2403sp1: + path: 1.25.1/24.03-lts-sp1/Dockerfile \ No newline at end of file diff --git a/Database/clickhouse/25.3.1.2703/24.03-lts-sp1/Dockerfile b/Database/clickhouse/25.3.1.2703/24.03-lts-sp1/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..2a344ecba15d07455c2744462b2cb2a0c7691fec --- /dev/null +++ b/Database/clickhouse/25.3.1.2703/24.03-lts-sp1/Dockerfile @@ -0,0 +1,29 @@ +ARG BASE=openeuler/openeuler:24.03-lts-sp1 +FROM ${BASE} + +ARG TARGETARCH +ARG BUILDARCH +ARG VERSION="25.3.1.2703" + + +WORKDIR /tmp +RUN if [ "$TARGETARCH" = "amd64" ]; then \ + BUILDARCH="x86_64"; \ + elif [ "$TARGETARCH" = "arm64" ]; then \ + BUILDARCH="aarch64"; \ + fi; \ + curl -fSL -o clickhouse-server.rpm https://github.com/ClickHouse/ClickHouse/releases/download/v${VERSION}-lts/clickhouse-server-${VERSION}.${BUILDARCH}.rpm && \ + curl -fSL -o clickhouse-common-static.rpm https://github.com/ClickHouse/ClickHouse/releases/download/v${VERSION}-lts/clickhouse-common-static-${VERSION}.${BUILDARCH}.rpm && \ + curl -fSL -o clickhouse-client.rpm https://github.com/ClickHouse/ClickHouse/releases/download/v${VERSION}-lts/clickhouse-client-${VERSION}.${BUILDARCH}.rpm + +RUN yum install -y sudo shadow clickhouse-common-static.rpm clickhouse-server.rpm clickhouse-client.rpm && \ + yum clean all + +COPY config.xml /etc/clickhouse-server/config.d/ +COPY entrypoint.sh /entrypoint.sh +RUN chmod 755 /entrypoint.sh && \ + mkdir -p /docker-entrypoint-initdb.d + +EXPOSE 9000 8123 9009 + +ENTRYPOINT ["/entrypoint.sh"] \ No newline at end of file diff --git a/Database/clickhouse/25.3.1.2703/24.03-lts-sp1/config.xml b/Database/clickhouse/25.3.1.2703/24.03-lts-sp1/config.xml new file mode 100644 index 0000000000000000000000000000000000000000..1c26541780534efa06d34388db3b9a97770cc7d6 --- /dev/null +++ b/Database/clickhouse/25.3.1.2703/24.03-lts-sp1/config.xml @@ -0,0 +1,12 @@ + + + :: + 0.0.0.0 + 1 + + + \ No newline at end of file diff --git a/Database/clickhouse/25.3.1.2703/24.03-lts-sp1/entrypoint.sh b/Database/clickhouse/25.3.1.2703/24.03-lts-sp1/entrypoint.sh new file mode 100644 index 0000000000000000000000000000000000000000..582e734bc987e77763dec78e6470abed90536a1c --- /dev/null +++ b/Database/clickhouse/25.3.1.2703/24.03-lts-sp1/entrypoint.sh @@ -0,0 +1,280 @@ +#!/bin/bash + +set -eo pipefail +shopt -s nullglob + +DO_CHOWN=1 +if [[ "${CLICKHOUSE_RUN_AS_ROOT:=0}" = "1" || "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]]; then + DO_CHOWN=0 +fi + +# CLICKHOUSE_UID and CLICKHOUSE_GID are kept for backward compatibility, but deprecated +# One must use either "docker run --user" or CLICKHOUSE_RUN_AS_ROOT=1 to run the process as +# FIXME: Remove ALL CLICKHOUSE_UID CLICKHOUSE_GID before 25.3 +if [[ "${CLICKHOUSE_UID:-}" || "${CLICKHOUSE_GID:-}" ]]; then + echo 'WARNING: Support for CLICKHOUSE_UID/CLICKHOUSE_GID will be removed in a couple of releases.' >&2 + echo 'WARNING: Either use a proper "docker run --user=xxx:xxxx" argument instead of CLICKHOUSE_UID/CLICKHOUSE_GID' >&2 + echo 'WARNING: or set "CLICKHOUSE_RUN_AS_ROOT=1" ENV to run the clickhouse-server as root:root' >&2 +fi + +# support `docker run --user=xxx:xxxx` +if [[ "$(id -u)" = "0" ]]; then + if [[ "$CLICKHOUSE_RUN_AS_ROOT" = 1 ]]; then + USER=0 + GROUP=0 + else + USER="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}" + GROUP="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}" + fi +else + USER="$(id -u)" + GROUP="$(id -g)" + DO_CHOWN=0 +fi + +# set some vars +CLICKHOUSE_CONFIG="${CLICKHOUSE_CONFIG:-/etc/clickhouse-server/config.xml}" + +# get CH directories locations +DATA_DIR="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=path || true)" +TMP_DIR="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=tmp_path || true)" +USER_PATH="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=user_files_path || true)" +LOG_PATH="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=logger.log || true)" +LOG_DIR="" +if [ -n "$LOG_PATH" ]; then LOG_DIR="$(dirname "$LOG_PATH")"; fi +ERROR_LOG_PATH="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=logger.errorlog || true)" +ERROR_LOG_DIR="" +if [ -n "$ERROR_LOG_PATH" ]; then ERROR_LOG_DIR="$(dirname "$ERROR_LOG_PATH")"; fi +FORMAT_SCHEMA_PATH="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=format_schema_path || true)" + +# There could be many disks declared in config +readarray -t DISKS_PATHS < <(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key='storage_configuration.disks.*.path' || true) +readarray -t DISKS_METADATA_PATHS < <(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key='storage_configuration.disks.*.metadata_path' || true) + +CLICKHOUSE_USER="${CLICKHOUSE_USER:-default}" +CLICKHOUSE_PASSWORD_FILE="${CLICKHOUSE_PASSWORD_FILE:-}" +if [[ -n "${CLICKHOUSE_PASSWORD_FILE}" && -f "${CLICKHOUSE_PASSWORD_FILE}" ]]; then + CLICKHOUSE_PASSWORD="$(cat "${CLICKHOUSE_PASSWORD_FILE}")" +fi +CLICKHOUSE_PASSWORD="${CLICKHOUSE_PASSWORD:-}" +CLICKHOUSE_DB="${CLICKHOUSE_DB:-}" +CLICKHOUSE_ACCESS_MANAGEMENT="${CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT:-0}" +CLICKHOUSE_SKIP_USER_SETUP="${CLICKHOUSE_SKIP_USER_SETUP:-0}" + +function create_directory_and_do_chown() { + local dir=$1 + # check if variable not empty + [ -z "$dir" ] && return + # ensure directories exist + if [ "$DO_CHOWN" = "1" ]; then + mkdir=( mkdir ) + else + # if DO_CHOWN=0 it means that the system does not map root user to "admin" permissions + # it mainly happens on NFS mounts where root==nobody for security reasons + # thus mkdir MUST run with user id/gid and not from nobody that has zero permissions + mkdir=( clickhouse su "${USER}:${GROUP}" mkdir ) + fi + if ! "${mkdir[@]}" -p "$dir"; then + echo "Couldn't create necessary directory: $dir" + exit 1 + fi + + if [ "$DO_CHOWN" = "1" ]; then + # ensure proper directories permissions + # but skip it for if directory already has proper premissions, cause recursive chown may be slow + if [ "$(stat -c %u "$dir")" != "$USER" ] || [ "$(stat -c %g "$dir")" != "$GROUP" ]; then + chown -R "$USER:$GROUP" "$dir" + fi + fi +} + +function manage_clickhouse_directories() { + for dir in "$ERROR_LOG_DIR" \ + "$LOG_DIR" \ + "$TMP_DIR" \ + "$USER_PATH" \ + "$FORMAT_SCHEMA_PATH" \ + "${DISKS_PATHS[@]}" \ + "${DISKS_METADATA_PATHS[@]}" + do + create_directory_and_do_chown "$dir" + done +} + +function manage_clickhouse_user() { + # Check if the `defaul` user is changed through any mounted file. It will mean that user took care of it already + # First, extract the users_xml.path and check it's relative or absolute + local USERS_XML USERS_CONFIG + USERS_XML=$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key='user_directories.users_xml.path') + case $USERS_XML in + /* ) # absolute path + cp "$USERS_XML" /tmp + USERS_CONFIG="/tmp/$(basename $USERS_XML)" + ;; + * ) # relative path to the $CLICKHOUSE_CONFIG + cp "$(dirname "$CLICKHOUSE_CONFIG")/${USERS_XML}" /tmp + USERS_CONFIG="/tmp/$(basename $USERS_XML)" + ;; + esac + + # Compare original `users.default` to the processed one + local ORIGINAL_DEFAULT PROCESSED_DEFAULT CLICKHOUSE_DEFAULT_CHANGED + ORIGINAL_DEFAULT=$(clickhouse extract-from-config --config-file "$USERS_CONFIG" --key='users.default' | sha256sum) + PROCESSED_DEFAULT=$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --users --key='users.default' --try | sha256sum) + [ "$ORIGINAL_DEFAULT" == "$PROCESSED_DEFAULT" ] && CLICKHOUSE_DEFAULT_CHANGED=0 || CLICKHOUSE_DEFAULT_CHANGED=1 + + if [ "$CLICKHOUSE_SKIP_USER_SETUP" == "1" ]; then + echo "$0: explicitly skip changing user 'default'" + elif [ -n "$CLICKHOUSE_USER" ] && [ "$CLICKHOUSE_USER" != "default" ] || [ -n "$CLICKHOUSE_PASSWORD" ] || [ "$CLICKHOUSE_ACCESS_MANAGEMENT" != "0" ]; then + # if clickhouse user is defined - create it (user "default" already exists out of box) + echo "$0: create new user '$CLICKHOUSE_USER' instead 'default'" + cat < /etc/clickhouse-server/users.d/default-user.xml + + + + + + + + <${CLICKHOUSE_USER}> + default + + ::/0 + + /]]]]>}]]> + default + ${CLICKHOUSE_ACCESS_MANAGEMENT} + + + +EOT + elif [ "$CLICKHOUSE_DEFAULT_CHANGED" == "1" ]; then + # Leave users as is, do nothing + : + else + echo "$0: neither CLICKHOUSE_USER nor CLICKHOUSE_PASSWORD is set, disabling network access for user '$CLICKHOUSE_USER'" + cat < /etc/clickhouse-server/users.d/default-user.xml + + + + + + + ::1 + 127.0.0.1 + + + + +EOT + fi +} + +CLICKHOUSE_ALWAYS_RUN_INITDB_SCRIPTS="${CLICKHOUSE_ALWAYS_RUN_INITDB_SCRIPTS:-}" + +function init_clickhouse_db() { + # checking $DATA_DIR for initialization + if [ -d "${DATA_DIR%/}/data" ]; then + DATABASE_ALREADY_EXISTS='true' + fi + + # run initialization if flag CLICKHOUSE_ALWAYS_RUN_INITDB_SCRIPTS is not empty or data directory is empty + if [[ -n "${CLICKHOUSE_ALWAYS_RUN_INITDB_SCRIPTS}" || -z "${DATABASE_ALREADY_EXISTS}" ]]; then + RUN_INITDB_SCRIPTS='true' + fi + + if [ -n "${RUN_INITDB_SCRIPTS}" ]; then + if [ -n "$(ls /docker-entrypoint-initdb.d/)" ] || [ -n "$CLICKHOUSE_DB" ]; then + # port is needed to check if clickhouse-server is ready for connections + HTTP_PORT="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=http_port --try)" + HTTPS_PORT="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=https_port --try)" + + if [ -n "$HTTP_PORT" ]; then + URL="http://127.0.0.1:$HTTP_PORT/ping" + else + URL="https://127.0.0.1:$HTTPS_PORT/ping" + fi + + # Listen only on localhost until the initialization is done + clickhouse su "${USER}:${GROUP}" clickhouse-server --config-file="$CLICKHOUSE_CONFIG" -- --listen_host=127.0.0.1 & + pid="$!" + + # check if clickhouse is ready to accept connections + # will try to send ping clickhouse via http_port (max 1000 retries by default, with 1 sec timeout and 1 sec delay between retries) + tries=${CLICKHOUSE_INIT_TIMEOUT:-1000} + while ! wget --spider --no-check-certificate -T 1 -q "$URL" 2>/dev/null; do + if [ "$tries" -le "0" ]; then + echo >&2 'ClickHouse init process timeout.' + exit 1 + fi + tries=$(( tries-1 )) + sleep 1 + done + + clickhouseclient=( clickhouse-client --multiquery --host "127.0.0.1" -u "$CLICKHOUSE_USER" --password "$CLICKHOUSE_PASSWORD" ) + + echo + + # create default database, if defined + if [ -n "$CLICKHOUSE_DB" ]; then + echo "$0: create database '$CLICKHOUSE_DB'" + "${clickhouseclient[@]}" -q "CREATE DATABASE IF NOT EXISTS $CLICKHOUSE_DB"; + fi + + for f in /docker-entrypoint-initdb.d/*; do + case "$f" in + *.sh) + if [ -x "$f" ]; then + echo "$0: running $f" + "$f" + else + echo "$0: sourcing $f" + # shellcheck source=/dev/null + . "$f" + fi + ;; + *.sql) echo "$0: running $f"; "${clickhouseclient[@]}" < "$f" ; echo ;; + *.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "${clickhouseclient[@]}"; echo ;; + *) echo "$0: ignoring $f" ;; + esac + echo + done + + if ! kill -s TERM "$pid" || ! wait "$pid"; then + echo >&2 'Finishing of ClickHouse init process failed.' + exit 1 + fi + fi + else + echo "ClickHouse Database directory appears to contain a database; Skipping initialization" + fi +} + +# if no args passed to `docker run` or first argument start with `--`, then the user is passing clickhouse-server arguments +if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then + # Watchdog is launched by default, but does not send SIGINT to the main process, + # so the container can't be finished by ctrl+c + CLICKHOUSE_WATCHDOG_ENABLE=${CLICKHOUSE_WATCHDOG_ENABLE:-0} + export CLICKHOUSE_WATCHDOG_ENABLE + + create_directory_and_do_chown "$DATA_DIR" + + # Change working directory to $DATA_DIR in case there're paths relative to $DATA_DIR, also avoids running + # clickhouse-server at root directory. + cd "$DATA_DIR" + + # Using functions here to avoid unnecessary work in case of launching other binaries, + # inspired by postgres, mariadb etc. entrypoints + # It is necessary to pass the docker library consistency test + manage_clickhouse_directories + manage_clickhouse_user + init_clickhouse_db + + # This replaces the shell script with the server: + exec clickhouse su "${USER}:${GROUP}" clickhouse-server --config-file="$CLICKHOUSE_CONFIG" "$@" +fi + +# Otherwise, we assume the user want to run his own process, for example a `bash` shell to explore this image +exec "$@" + +# vi: ts=4: sw=4: sts=4: expandtab \ No newline at end of file diff --git a/Database/clickhouse/meta.yml b/Database/clickhouse/meta.yml new file mode 100644 index 0000000000000000000000000000000000000000..aff6cba98af765a3dab885ff2ff6b69df898bcaf --- /dev/null +++ b/Database/clickhouse/meta.yml @@ -0,0 +1,2 @@ +25.3.1.2703-oe2403sp1: + path: 25.3.1.2703/24.03-lts-sp1/Dockerfile \ No newline at end of file diff --git a/Database/image-list.yml b/Database/image-list.yml index 02c2bfc624fb1766319b4a36406b4b2529e12069..9592d958e0c94dbd3a6d161fd18b5db6c1d9f4be 100644 --- a/Database/image-list.yml +++ b/Database/image-list.yml @@ -4,4 +4,6 @@ images: mysql: mysql postgres: postgres redis: redis + clickhouse: clickhouse + influxdb: influxdb \ No newline at end of file diff --git a/Database/influxdb/2.7.11/24.03-lts-spq/Dockerfile b/Database/influxdb/2.7.11/24.03-lts-spq/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..34d49eb3daf2a6af744e386583af9c3fb9fec935 --- /dev/null +++ b/Database/influxdb/2.7.11/24.03-lts-spq/Dockerfile @@ -0,0 +1,20 @@ +ARG BASE=openeuler/openeuler:24.03-lts-sp1 +FROM ${BASE} + +ARG TARGETARCH +ARG VERSION=2.7.11 +ENV INFLUXDB_URL=https://dl.influxdata.com/influxdb/releases/influxdb2-${VERSION}_linux_${TARGETARCH}.tar.gz + +WORKDIR /influxdb +RUN curl -fSL -o influxdb.tar.gz ${INFLUXDB_URL} && \ + tar -zxf influxdb.tar.gz -C /influxdb --strip-components=1 && \ + rm influxdb.tar.gz + +RUN cp -r etc/* /etc/ && \ + cp usr/bin/* /usr/bin/ && \ + cp -r usr/lib/* /usr/lib/ && \ + cp -r usr/share/* /usr/share/ && \ + chmod 755 /usr/bin/influxd + +RUN rm -rf /influxdb +CMD ["/usr/bin/influxd"] \ No newline at end of file diff --git a/Database/influxdb/meta.yml b/Database/influxdb/meta.yml new file mode 100644 index 0000000000000000000000000000000000000000..f869941ac887adab72cf7a96bdcd106f4da9c750 --- /dev/null +++ b/Database/influxdb/meta.yml @@ -0,0 +1,2 @@ +2.7.11-oe2403sp1: + path: 2.7.11/24.03-lts-sp1/Dockerfile \ No newline at end of file diff --git a/HPC/bwa/meta.yml b/HPC/bwa/meta.yml index 30febc63c4ebb3ecefdb8fb1f4e8a28977b2bdb0..09e344cd46cdd02c06765a7a4b61673061904fe9 100644 --- a/HPC/bwa/meta.yml +++ b/HPC/bwa/meta.yml @@ -1,2 +1,2 @@ 0.7.18-oe2203sp3: - path: 0.7.18/22.03-lts-sp3/Dcokerfile \ No newline at end of file + path: 0.7.18/22.03-lts-sp3/Dockerfile \ No newline at end of file diff --git a/Others/glassfish/7.0.23/24.03-lts-sp1/Dockerfile b/Others/glassfish/7.0.23/24.03-lts-sp1/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..0a4af9757a5591f4e3ded1e5c68964104aa45186 --- /dev/null +++ b/Others/glassfish/7.0.23/24.03-lts-sp1/Dockerfile @@ -0,0 +1,21 @@ +ARG BASE=openeuler/openeuler:24.03-lts-sp1 +FROM ${BASE} + +ARG TARGETARCH +ARG VERSION=7.0.23 + +RUN yum install -y java-17-openjdk-devel git && \ + yum clean all && \ + cd / && \ + curl -fSL -o apache-maven.tar.gz https://repo.huaweicloud.com/apache/maven/maven-3/3.9.3/binaries/apache-maven-3.9.3-bin.tar.gz && \ + mkdir -p /usr/local/maven && \ + tar -zxf apache-maven.tar.gz -C /usr/local/maven --strip-components=1 && \ + rm -rf apache-maven.tar.gz + +ENV MAVEN_HOME=/usr/local/maven +ENV PATH=${MAVEN_HOME}/bin:$PATH + +WORKDIR / +RUN git clone -b ${VERSION} https://github.com/eclipse-ee4j/glassfish.git && \ + cd glassfish && \ + mvn clean install -Pfastest -T4C \ No newline at end of file diff --git a/Others/glassfish/meta.yml b/Others/glassfish/meta.yml new file mode 100644 index 0000000000000000000000000000000000000000..8860fee09333f5aa3057674cf61c7242e9e63f5b --- /dev/null +++ b/Others/glassfish/meta.yml @@ -0,0 +1,2 @@ +7.0.23-oe2403sp1: + path: 7.0.23/24.03-lts-sp1/Dockerfile \ No newline at end of file diff --git a/Others/image-list.yml b/Others/image-list.yml index d4cd33d88d6d7928c072cbd543ec65a98b4e3740..4187bd03c536f5eef12d227bb21fc18df799c495 100644 --- a/Others/image-list.yml +++ b/Others/image-list.yml @@ -20,3 +20,5 @@ images: conda: conda pypi: pypi npmjs: npmjs + glassfish: glassfish + jenkins: jenkins diff --git a/Others/jenkins/2.502/24.03-lts-sp1/Dockerfile b/Others/jenkins/2.502/24.03-lts-sp1/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..e4589f0949136e9232f2d359d09b08d200a88ed3 --- /dev/null +++ b/Others/jenkins/2.502/24.03-lts-sp1/Dockerfile @@ -0,0 +1,20 @@ +ARG BASE=openeuler/openeuler:24.03-lts-sp1 +FROM ${BASE} + +ARG TARGETARCH +ARG VERSION=2.502 +ARG MAVEN_VERSION=3.9.6 +ARG MAVEN_URL=https://repo.huaweicloud.com/apache/maven/maven-3/${MAVEN_VERSION}/binaries/apache-maven-${MAVEN_VERSION}-bin.tar.gz + +RUN yum install -y java-17-openjdk-devel git && \ + yum clean all && \ + curl -fSL -o apache-maven.tar.gz ${MAVEN_URL} && \ + mkdir -p /usr/local/maven && \ + tar -zxf apache-maven.tar.gz -C /usr/local/maven --strip-components=1 && \ + rm -rf apache-maven.tar.gz + +ENV PATH=$PATH:/usr/local/maven/bin + +RUN git clone -b jenkins-${VERSION} https://github.com/jenkinsci/jenkins.git && \ + cd jenkins && \ + mvn clean install -Dmaven.test.skip=true diff --git a/Others/jenkins/meta.yml b/Others/jenkins/meta.yml new file mode 100644 index 0000000000000000000000000000000000000000..adc7799be388331e6652c0a2ac96bb22fa5108e8 --- /dev/null +++ b/Others/jenkins/meta.yml @@ -0,0 +1,2 @@ +2.502-oe2403sp1: + path: 2.502/24.03-lts-sp1/Dockerfile \ No newline at end of file diff --git a/Storage/alluxio/2.9.4/24.03-lts-sp1/Dockerfile b/Storage/alluxio/2.9.4/24.03-lts-sp1/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..cd3a3c74439fc76a6a2f27305c0adcdf9f5b5223 --- /dev/null +++ b/Storage/alluxio/2.9.4/24.03-lts-sp1/Dockerfile @@ -0,0 +1,26 @@ +ARG BASE=openeuler/openeuler:24.03-lts-sp1 +FROM ${BASE} + + +ARG VERSION="2.9.4" +ENV ALLUXIO_HOME=/opt/alluxio \ + ALLUXIO_CONF_DIR=/etc/alluxio \ + ALLUXIO_LOGS_DIR=/var/log/alluxio + +COPY entrypoint.sh /opt/docker/bin/ +COPY libexec/ /opt/docker/libexec/ +COPY conf/ ${ALLUXIO_CONF_DIR} + +WORKDIR ${ALLUXIO_HOME} +RUN yum install -y java-1.8.0-openjdk-devel hostname +RUN curl -fSL -o /tmp/alluxio.tar.gz https://downloads.alluxio.io/downloads/files/${VERSION}/alluxio-${VERSION}-bin.tar.gz +RUN tar -zxf /tmp/alluxio.tar.gz --strip-components=1 && \ + chown -R root:root ${ALLUXIO_HOME} && \ + mkdir -p ${ALLUXIO_CONF_DIR} && \ + mkdir -p ${ALLUXIO_LOGS_DIR} && \ + chmod 755 /opt/docker/bin/entrypoint.sh + +ENV PATH=$PATH:${ALLUXIO_HOME}/bin + +ENTRYPOINT ["/opt/docker/bin/entrypoint.sh"] +CMD ["bash"] \ No newline at end of file diff --git a/Storage/alluxio/2.9.4/24.03-lts-sp1/conf/alluxio-env.sh b/Storage/alluxio/2.9.4/24.03-lts-sp1/conf/alluxio-env.sh new file mode 100644 index 0000000000000000000000000000000000000000..ca0bc8d31be47eaa5c97dfa67aa8f6d1abbf7b31 --- /dev/null +++ b/Storage/alluxio/2.9.4/24.03-lts-sp1/conf/alluxio-env.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash +# +# The Alluxio Open Foundation licenses this work under the Apache License, version 2.0 +# (the "License"). You may not use this work except in compliance with the License, which is +# available at www.apache.org/licenses/LICENSE-2.0 +# +# This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied, as more fully set forth in the License. +# +# See the NOTICE file distributed with this work for information regarding copyright ownership. +# + +# Copy this file as alluxio-env.sh and edit it to configure Alluxio for your +# site. This file is sourced to launch Alluxio servers or use Alluxio shell +# commands. +# +# This file is an optional approach to configure Alluxio options by setting the +# following listed environment variables. Note that, setting this file will not +# affect jobs (e.g., Spark job or MapReduce job) that are using Alluxio client +# as a library. Alternatively, it is recommended to create alluxio-site.properties file, +# which supports all the configuration options provided by Alluxio +# (http://www.alluxio.org/documentation/en/Configuration-Settings.html), +# and is respected by both external jobs and Alluxio servers (or shell). + +# The directory where Alluxio deployment is installed. (Default: the parent directory of libexec/). +ALLUXIO_HOME=/opt/alluxio + +# The directory where log files are stored. (Default: ${ALLUXIO_HOME}/logs). +# ALLUXIO_LOGS_DIR + +# Hostname of the master. +# ALLUXIO_MASTER_HOSTNAME + +# This is now deprecated. Support will be removed in v2.0 +# ALLUXIO_MASTER_ADDRESS + +# The directory where a worker stores in-memory data. (Default: /mnt/ramdisk). +# E.g. On linux, /mnt/ramdisk for ramdisk, /dev/shm for tmpFS; on MacOS, /Volumes/ramdisk for ramdisk +# ALLUXIO_RAM_FOLDER + +# Address of the under filesystem address. (Default: ${ALLUXIO_HOME}/underFSStorage) +# E.g. "/my/local/path" to use local fs, "hdfs://localhost:9000/alluxio" to use a local hdfs +ALLUXIO_UNDERFS_ADDRESS=/mnt/journal + +# How much memory to use per worker. (Default: 1GB) +# E.g. "1000MB", "2GB" +# ALLUXIO_WORKER_MEMORY_SIZE + +# Config properties set for Alluxio master, worker and shell. (Default: "") +# E.g. "-Dalluxio.master.port=39999" +# ALLUXIO_JAVA_OPTS + +# Config properties set for Alluxio master daemon. (Default: "") +# E.g. "-Dalluxio.master.port=39999" +# ALLUXIO_MASTER_JAVA_OPTS + +# Config properties set for Alluxio worker daemon. (Default: "") +# E.g. "-Dalluxio.worker.port=49999" to set worker port, "-Xms2048M -Xmx2048M" to limit the heap size of worker. +# ALLUXIO_WORKER_JAVA_OPTS + +# Config properties set for Alluxio shell. (Default: "") +# E.g. "-Dalluxio.user.file.writetype.default=CACHE_THROUGH" +# ALLUXIO_USER_JAVA_OPTS \ No newline at end of file diff --git a/Storage/alluxio/2.9.4/24.03-lts-sp1/conf/core-site.xml b/Storage/alluxio/2.9.4/24.03-lts-sp1/conf/core-site.xml new file mode 100644 index 0000000000000000000000000000000000000000..636817df02db483e1a5036a4e08ac3506b4367a0 --- /dev/null +++ b/Storage/alluxio/2.9.4/24.03-lts-sp1/conf/core-site.xml @@ -0,0 +1,127 @@ + + + + fs.swift.impl + org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem + + + + + fs.swift.service.dal05.auth.url + https://dal05.objectstorage.softlayer.net/auth/v1.0 + + + fs.swift.service.dal05.http.port + 8080 + + + fs.swift.service.dal05.public + true + + + fs.swift.service.dal05.location-aware + false + + + fs.swift.service.ibm.dal05.endpoint.prefix + endpoints + + + fs.swift.service.dal05.apikey + API_KEY + + + fs.swift.service.dal05.username + ACCOUNT:USER NAME + + + fs.swift.service.dal05.use.get.auth + true + + + + + fs.swift.service.swift1.location-aware + false + + + fs.swift.service.swift1.auth.url + http://127.0.0.1:5000/v2.0/tokens + + + fs.swift.service.swift1.http.port + 8080 + + + fs.swift.service.swift1.region + RegionOne + + + fs.swift.service.swift1.public + true + + + fs.swift.service.swift1.auth.endpoint.prefix + endpoints + + + fs.swift.service.swift1.tenant + TENANT + + + fs.swift.service.swift1.password + PASSWORD + + + fs.swift.service.swift1.username + USER NAME + + + + + fs.swift.service.swift2.auth.url + http://127.0.0.1:8080/auth/v1.0 + + + fs.swift.service.swift2.http.port + 8080 + + + fs.swift.service.swift2.public + true + + + fs.swift.service.swift2.location-aware + false + + + fs.swift.service.swift2.endpoint.prefix + endpoints + + + fs.swift.service.swift2.apikey + testing + + + fs.swift.service.swift2.username + test:tester + + + fs.swift.service.swift2.use.get.auth + true + + \ No newline at end of file diff --git a/Storage/alluxio/2.9.4/24.03-lts-sp1/conf/log4j.properties b/Storage/alluxio/2.9.4/24.03-lts-sp1/conf/log4j.properties new file mode 100644 index 0000000000000000000000000000000000000000..962e90598c604cf8201aac9d9f63e249fbc38f91 --- /dev/null +++ b/Storage/alluxio/2.9.4/24.03-lts-sp1/conf/log4j.properties @@ -0,0 +1,53 @@ +# +# The Alluxio Open Foundation licenses this work under the Apache License, version 2.0 +# (the "License"). You may not use this work except in compliance with the License, which is +# available at www.apache.org/licenses/LICENSE-2.0 +# +# This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied, as more fully set forth in the License. +# +# See the NOTICE file distributed with this work for information regarding copyright ownership. +# + +# May get overridden by System Property + +log4j.rootLogger=INFO, ${alluxio.logger.type} + +log4j.appender.Console=org.apache.log4j.ConsoleAppender +log4j.appender.Console.Target=System.out +log4j.appender.Console.layout=org.apache.log4j.PatternLayout +log4j.appender.Console.layout.ConversionPattern=%d{ISO8601} %-5p %c{1} (%F:%M) - %m%n + +# Appender for Master +log4j.appender.MASTER_LOGGER=org.apache.log4j.ConsoleAppender +log4j.appender.MASTER_LOGGER.Target=System.out +log4j.appender.MASTER_LOGGER.layout=org.apache.log4j.PatternLayout +log4j.appender.MASTER_LOGGER.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M) - %m%n + +# Appender for Proxy +log4j.appender.PROXY_LOGGER=org.apache.log4j.ConsoleAppender +log4j.appender.PROXY_LOGGER.Target=System.out +log4j.appender.PROXY_LOGGER.layout=org.apache.log4j.PatternLayout +log4j.appender.PROXY_LOGGER.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M) - %m%n + +# Appender for Workers +log4j.appender.WORKER_LOGGER=org.apache.log4j.ConsoleAppender +log4j.appender.WORKER_LOGGER.Target=System.out +log4j.appender.WORKER_LOGGER.layout=org.apache.log4j.PatternLayout +log4j.appender.WORKER_LOGGER.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M) - %m%n + +# Appender for User +log4j.appender.USER_LOGGER=org.apache.log4j.RollingFileAppender +log4j.appender.USER_LOGGER.File=${alluxio.logs.dir}/user_${user.name}.log +log4j.appender.USER_LOGGER.MaxFileSize=10MB +log4j.appender.USER_LOGGER.MaxBackupIndex=10 +log4j.appender.USER_LOGGER.layout=org.apache.log4j.PatternLayout +log4j.appender.USER_LOGGER.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M) - %m%n + +# Appender for Fuse +log4j.appender.FUSE_LOGGER=org.apache.log4j.RollingFileAppender +log4j.appender.FUSE_LOGGER.File=${alluxio.logs.dir}/fuse.log +log4j.appender.FUSE_LOGGER.MaxFileSize=10MB +log4j.appender.FUSE_LOGGER.MaxBackupIndex=10 +log4j.appender.FUSE_LOGGER.layout=org.apache.log4j.PatternLayout +log4j.appender.FUSE_LOGGER.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M) - %m%n \ No newline at end of file diff --git a/Storage/alluxio/2.9.4/24.03-lts-sp1/entrypoint.sh b/Storage/alluxio/2.9.4/24.03-lts-sp1/entrypoint.sh new file mode 100644 index 0000000000000000000000000000000000000000..5b594a74348480d799955cb21a90b76a7d9a4e38 --- /dev/null +++ b/Storage/alluxio/2.9.4/24.03-lts-sp1/entrypoint.sh @@ -0,0 +1,89 @@ +#!/bin/bash +set -eo pipefail + +# Setup environment +source /opt/docker/libexec/alluxio-init.sh + + +get_env() { + BIN=$ALLUXIO_HOME/bin + ALLUXIO_LIBEXEC_DIR=${ALLUXIO_LIBEXEC_DIR:-$ALLUXIO_HOME/libexec} + . ${ALLUXIO_LIBEXEC_DIR}/alluxio-config.sh + CLASSPATH=${ALLUXIO_CLIENT_CLASSPATH} +} + + +start_worker() { + CLASSPATH=${ALLUXIO_SERVER_CLASSPATH} + + alluxio-mount.sh Mount + MOUNT_FAILED=$? + + if [ ${MOUNT_FAILED} -ne 0 ] ; then + echo "Mount failed, not starting worker" >&2 + exit 1 + fi + + if [[ -z ${ALLUXIO_WORKER_JAVA_OPTS} ]]; then + ALLUXIO_WORKER_JAVA_OPTS=${ALLUXIO_JAVA_OPTS} + fi + + echo "Starting worker @ $(hostname -f)" + ${JAVA} -cp ${CLASSPATH} ${ALLUXIO_WORKER_JAVA_OPTS} alluxio.worker.AlluxioWorker 2>&1 +} + + +start_master() { + CLASSPATH=${ALLUXIO_SERVER_CLASSPATH} + + if [[ -z ${ALLUXIO_MASTER_JAVA_OPTS} ]]; then + ALLUXIO_MASTER_JAVA_OPTS=${ALLUXIO_JAVA_OPTS} + fi + + if [ ! -d ${ALLUXIO_MASTER_JOURNAL_FOLDER}/BlockMaster ]; then + mkdir -p ${ALLUXIO_MASTER_JOURNAL_FOLDER} + alluxio format + fi + + echo "Starting master @ $(hostname -f)" + ${JAVA} -cp ${CLASSPATH} ${ALLUXIO_MASTER_JAVA_OPTS} alluxio.master.AlluxioMaster 2>&1 +} + + +start_proxy() { + CLASSPATH=${ALLUXIO_SERVER_CLASSPATH} + + if [[ -z ${ALLUXIO_PROXY_JAVA_OPTS} ]]; then + ALLUXIO_PROXY_JAVA_OPTS=${ALLUXIO_JAVA_OPTS} + fi + + echo "Starting proxy @ $(hostname -f)" + ${JAVA} -cp ${CLASSPATH} ${ALLUXIO_PROXY_JAVA_OPTS} alluxio.proxy.AlluxioProxy 2>&1 +} + + +main() { + # get environment + get_env + + # ensure log/data dirs + #ensure_dirs + + case "$1" in + master) + start_master + ;; + proxy) + start_proxy + ;; + worker) + start_worker + ;; + *) + exec $@ + exit $? + ;; + esac +} + +main "$@" \ No newline at end of file diff --git a/Storage/alluxio/2.9.4/24.03-lts-sp1/libexec/alluxio-init.sh b/Storage/alluxio/2.9.4/24.03-lts-sp1/libexec/alluxio-init.sh new file mode 100644 index 0000000000000000000000000000000000000000..2dd23dc6cb3ba3a5886f02e48422ff00d1428243 --- /dev/null +++ b/Storage/alluxio/2.9.4/24.03-lts-sp1/libexec/alluxio-init.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +set -eo pipefail + +source /opt/docker/libexec/alluxio-vars.sh diff --git a/Storage/alluxio/2.9.4/24.03-lts-sp1/libexec/alluxio-vars.sh b/Storage/alluxio/2.9.4/24.03-lts-sp1/libexec/alluxio-vars.sh new file mode 100644 index 0000000000000000000000000000000000000000..a012562a61a5608aa85dfd8c80be1a499cd7b273 --- /dev/null +++ b/Storage/alluxio/2.9.4/24.03-lts-sp1/libexec/alluxio-vars.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash +# The directory where log files are stored. (Default: ${ALLUXIO_HOME}/logs). +# ALLUXIO_LOGS_DIR + +# The directory where a worker stores in-memory data. (Default: /mnt/ramdisk). +# E.g. On linux, /mnt/ramdisk for ramdisk, /dev/shm for tmpFS; on MacOS, /Volumes/ramdisk for ramdisk +export ALLUXIO_RAM_FOLDER=${ALLUXIO_RAM_FOLDER=/mnt/ramdisk} + +# Address of the under filesystem address. (Default: ${ALLUXIO_HOME}/underFSStorage) +# E.g. "/my/local/path" to use local fs, "hdfs://localhost:9000/alluxio" to use a local hdfs +export ALLUXIO_UNDERFS_ADDRESS=${ALLUXIO_UNDERFS_ADDRESS=${ALLUXIO_HOME}/underFSStorage} + +# How much memory to use per worker. (Default: 1GB) +# E.g. "1000MB", "2GB" +export ALLUXIO_WORKER_MEMORY_SIZE=${ALLUXIO_WORKER_MEMORY_SIZE=1GB} + +# Config properties set for Alluxio master, worker and shell. (Default: "") +# E.g. "-Dalluxio.master.port=39999" +# ALLUXIO_JAVA_OPTS + +# Config properties set for Alluxio master daemon. (Default: "") +# E.g. "-Dalluxio.master.port=39999" +# ALLUXIO_MASTER_JAVA_OPTS + +# Config properties set for Alluxio worker daemon. (Default: "") +# E.g. "-Dalluxio.worker.port=49999" to set worker port, "-Xms2048M -Xmx2048M" to limit the heap size of worker. +# ALLUXIO_WORKER_JAVA_OPTS + +# Config properties set for Alluxio shell. (Default: "") +# E.g. "-Dalluxio.user.file.writetype.default=CACHE_THROUGH" +# ALLUXIO_USER_JAVA_OPTS + + +# Hostname of the master. +export ALLUXIO_MASTER_HOSTNAME=${ALLUXIO_MASTER_HOSTNAME=localhost} +export ALLUXIO_MASTER_PORT=${ALLUXIO_MASTER_PORT=19998} +export ALLUXIO_MASTER_WEB_PORT=${ALLUXIO_MASTER_WEB_PORT=19999} +export ALLUXIO_MASTER_JOURNAL_FOLDER=${ALLUXIO_MASTER_JOURNAL_FOLDER=/mnt/journal} + +# Hostname of the worker +export ALLUXIO_WORKER_HOSTNAME=${ALLUXIO_WORKER_HOSTNAME=$(hostname -f)} +export ALLUXIO_WORKER_PORT=${ALLUXIO_WORKER_PORT=29998} +export ALLUXIO_WORKER_WEB_PORT=${ALLUXIO_WORKER_WEB_PORT=30000} +export ALLUXIO_WORKER_DATA_PORT=${ALLUXIO_WORKER_DATA_PORT=29999} +export ALLUXIO_WORKER_FOLDER=${ALLUXIO_WORKER_FOLDER=alluxio} + +# S3 properties +export S3_PROXY_HOST=${S3_PROXY_HOST} +export S3_PROXY_PORT=${S3_PROXY_PORT=-1} +export S3_PROXY_USE_HTTPS=${S3_PROXY_USE_HTTPS=false} +export S3_ENDPOINT=${S3_ENDPOINT} +export S3_ENDPOINT_HTTP_PORT=${S3_ENDPOINT_HTTP_PORT=80} +export S3_ENDPOINT_HTTPS_PORT=${S3_ENDPOINT_HTTPS_PORT=443} + +export AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} +export AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \ No newline at end of file diff --git a/Storage/alluxio/meta.yml b/Storage/alluxio/meta.yml new file mode 100644 index 0000000000000000000000000000000000000000..012f5a1c4fd238f319d8840768fe62f69c6adbf0 --- /dev/null +++ b/Storage/alluxio/meta.yml @@ -0,0 +1,2 @@ +2.9.4-oe2403sp1: + path: 2.9.4/24.03-lts-sp1/Dockerfile \ No newline at end of file diff --git a/Storage/arrow/19.0.1/24.03-lts-sp1/Dockerfile b/Storage/arrow/19.0.1/24.03-lts-sp1/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..65272a256e6cd62c4de4a3b4a90d43802fc08a15 --- /dev/null +++ b/Storage/arrow/19.0.1/24.03-lts-sp1/Dockerfile @@ -0,0 +1,20 @@ +ARG BASE=openeuler/openeuler:24.03-lts-sp1 +FROM ${BASE} + +ARG VERSION=19.0.1 + +RUN yum install -y git gcc gcc-c++ make cmake wget && \ + yum clean all + +RUN git clone -b maint-${VERSION} https://github.com/apache/arrow.git && \ + mkdir -p /arrow/cpp/release + +WORKDIR /arrow/cpp/release && \ + camke .. && \ + make -j$(nproc) \ + && make install DESTDIR=/usr/local + +RUN rm -rf /arrow + +WORKDIR /usr/local +CMD ["/bin/bash"] \ No newline at end of file diff --git a/Storage/arrow/meta.yml b/Storage/arrow/meta.yml new file mode 100644 index 0000000000000000000000000000000000000000..a364baa946fba4c91333af2235695729d96b55b4 --- /dev/null +++ b/Storage/arrow/meta.yml @@ -0,0 +1,2 @@ +19.0.1-oe2403sp1: + path: 19.0.1/24.03-lts-sp1/Dockerfile \ No newline at end of file diff --git a/Storage/cubefs/3.5.0/24.03-lts-sp1/Dockerfile b/Storage/cubefs/3.5.0/24.03-lts-sp1/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..9c433f4a5f2e488b620efdb3dfccb535eb5462ab --- /dev/null +++ b/Storage/cubefs/3.5.0/24.03-lts-sp1/Dockerfile @@ -0,0 +1,26 @@ +ARG BASE=openeuler/openeuler:24.03-lts-sp1 +FROM ${BASE} AS builder + +ARG TARGETARCH +ARG VERSION=3.5.0 +ARG GO_VERSION=1.17 + +WORKDIR /app +RUN yum install -y git make cmake g++ maven && \ + yum clean all && \ + curl -fSL -o go.tar.gz https://golang.google.cn/dl/go${GO_VERSION}.linux-${TARGETARCH}.tar.gz && \ + tar -xvf go.tar.gz -C /usr/local && \ + rm -rf go.tar.gz +ENV PATH="/usr/local/go/bin:${PATH}" + +RUN git clone -b v${VERSION} https://github.com/cubefs/cubefs.git && \ + cd cubefs && \ + make + + +FROM ${BASE} + +COPY --from=builder /usr/local/go /usr/local/go +COPY --from=builder /app/cubefs/build/bin/* /usr/local/bin/ + +ENV PATH="/usr/local/go/bin:${PATH}" \ No newline at end of file diff --git a/Storage/cubefs/meta.yml b/Storage/cubefs/meta.yml new file mode 100644 index 0000000000000000000000000000000000000000..a6810f6be0835f8db6b678043bd32ee8e4e03b49 --- /dev/null +++ b/Storage/cubefs/meta.yml @@ -0,0 +1,2 @@ +3.5.0-oe2403sp1: + path: 3.5.0/24.03-lts-sp1/Dockerfile \ No newline at end of file diff --git a/Storage/drill/1.21.2/24.03-lts-sp1/Dockerfile b/Storage/drill/1.21.2/24.03-lts-sp1/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..3f5dcc8629fa734a35b784392d290ea43c043e0e --- /dev/null +++ b/Storage/drill/1.21.2/24.03-lts-sp1/Dockerfile @@ -0,0 +1,37 @@ +ARG BASE=openeuler/openeuler:24.03-lts-sp1 +FROM ${BASE} + +ARG VERSION=1.21.2 + + +ARG PACKAGE=apache-drill-${VERSION}.tar.gz +ARG PACKAGE_RUL=https://dlcdn.apache.org/drill/${VERSION}/${PACKAGE} + +ENV DRILL_HEAP=900M +ENV ZOOKEEPER_HOST=zookeeper +ENV PATH $PATH:/apache-drill/bin:/zookeeper/bin + +WORKDIR / +RUN dnf install -y java-1.8.0-openjdk-devel supervisor which && \ + yum clean all && \ + mkdir -p /etc/supervisor.d + +RUN curl -fSL -o "${PACKAGE}" "$PACKAGE_RUL" && \ + tar zxf "${PACKAGE}" && \ + test -d "apache-drill-${VERSION}" && \ + rm -fv "${PACKAGE}" && \ + ln -sv "apache-drill-${VERSION}" apache-drill + +COPY entrypoint.sh / +COPY drill.ini /etc/supervisor.d/ +RUN sed -i -e "s/-Xms1G/-Xms\$DRILL_MAX_HEAP/" apache-drill/conf/drill-env.sh && \ + sed -i -e "s/^DRILL_MAX_HEAP=.*/DRILL_MAX_HEAP=\"${DRILL_HEAP}\"/" apache-drill/conf/drill-env.sh && \ + sed -i -e "s/^DRILL_HEAP=.*/DRILL_HEAP=\"${DRILL_HEAP}\"/" apache-drill/conf/drill-env.sh && \ + sed -i -e "s/^\([[:space:]]*\)zk.connect:.*/\\1zk.connect: \"${ZOOKEEPER_HOST}\"/" apache-drill/conf/drill-override.conf && \ + chmod 755 /entrypoint.sh + + + +EXPOSE 8047 + +CMD ["/entrypoint.sh"] \ No newline at end of file diff --git a/Storage/drill/1.21.2/24.03-lts-sp1/drill.ini b/Storage/drill/1.21.2/24.03-lts-sp1/drill.ini new file mode 100644 index 0000000000000000000000000000000000000000..eb655ac9e59f676cac65634b73be8c71df28db8c --- /dev/null +++ b/Storage/drill/1.21.2/24.03-lts-sp1/drill.ini @@ -0,0 +1,2 @@ +[program:drill] +command=sh -c '/apache-drill/bin/drillbit.sh start && sleep 2 && tail -f /dev/null /apache-drill/log/*' \ No newline at end of file diff --git a/Storage/drill/1.21.2/24.03-lts-sp1/entrypoint.sh b/Storage/drill/1.21.2/24.03-lts-sp1/entrypoint.sh new file mode 100644 index 0000000000000000000000000000000000000000..14d57b172b0b32818e7a9ac3517585380e7069fd --- /dev/null +++ b/Storage/drill/1.21.2/24.03-lts-sp1/entrypoint.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +set -euo pipefail +[ -n "${DEBUG:-}" ] && set -x + +export JAVA_HOME="${JAVA_HOME:-/usr}" +export DRILL_HEAP="${DRILL_HEAP:-900M}" +export ZOOKEEPER_HOST="${ZOOKEEPER_HOST:-zookeeper}" + +sed -i -e "s/-Xms1G/-Xms\$DRILL_MAX_HEAP/" apache-drill/conf/drill-env.sh +sed -i -e "s/^DRILL_MAX_HEAP=.*/DRILL_MAX_HEAP=\"${DRILL_HEAP}\"/" apache-drill/conf/drill-env.sh + +sed -i -e "s/^DRILL_HEAP=.*/DRILL_HEAP=\"${DRILL_HEAP}\"/" apache-drill/conf/drill-env.sh +sed -i -e "s/^\\([[:space:]]*\\)zk.connect:.*/\\1zk.connect: \"${ZOOKEEPER_HOST}\"/" apache-drill/conf/drill-override.conf + +if [ -t 0 ]; then + sqlline -u jdbc:drill:zk=local +else + echo " +Running non-interactively, will not open Apache Drill SQL shell + +For Apache Drill shell start this image with 'docker run -t -i' switches + +Otherwise you will need to have a separate ZooKeeper container linked (one is available from harisekhon/zookeeper) and specify: + +docker run -e ZOOKEEPER_HOST=:2181 supervisord -n +" +fi \ No newline at end of file diff --git a/Storage/drill/meta.yml b/Storage/drill/meta.yml new file mode 100644 index 0000000000000000000000000000000000000000..86e897668302bd181b0dc74a0ad94885f2ef4465 --- /dev/null +++ b/Storage/drill/meta.yml @@ -0,0 +1,2 @@ +1.21.2-oe2403sp1: + path: 1.21.2/24.03-lts-sp1/Dockerfile \ No newline at end of file diff --git a/Storage/image-list.yml b/Storage/image-list.yml index 3622e70b991a4d166c2513a3960301de937ac47c..5559f1e18286495def1c9c0b5116a740225e935a 100644 --- a/Storage/image-list.yml +++ b/Storage/image-list.yml @@ -1,3 +1,8 @@ images: daos: daos - kudu: kudu \ No newline at end of file + kudu: kudu + alluxio: alluxio + arrow: arrow + cubefs: cubefs + drill: drill + lizardfs: lizardfs \ No newline at end of file diff --git a/Storage/lizardfs/3.12.0/24.03-lts-sp1/Dockerfile b/Storage/lizardfs/3.12.0/24.03-lts-sp1/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..f0381ba03ca74629d08afa4a37fd3eca6c08dc68 --- /dev/null +++ b/Storage/lizardfs/3.12.0/24.03-lts-sp1/Dockerfile @@ -0,0 +1,17 @@ +ARG BASE=openeuler/openeuler:24.03-lts-sp1 +FROM ${BASE} + +ARG VERSION=3.12.0 + +RUN yum install -y \ + fmt fmt-devel git gcc gcc-c++ fuse-devel cmake pkgconfig unzip spdlog-devel \ + zlib-devel asciidoc Judy-devel libdb-devel boost-devel pam-devel make && \ + yum clean all + +RUN git clone https://github.com/lizardfs/lizardfs.git +WORKDIR /lizardfs +RUN sed -i "s/fsync(ctx, fileinfo)/fsync(ctx, fileinfo, ec)/g" src/mount/client/client.cc && \ + sed -i "s/readlink(ctx, inode)/readlink(ctx, inode, ec)/g" src/mount/client/client.cc && \ + ./configure && \ + make -j4 && \ + make install \ No newline at end of file diff --git a/Storage/lizardfs/meta.yml b/Storage/lizardfs/meta.yml new file mode 100644 index 0000000000000000000000000000000000000000..64d7f797c231329bc6cbb5695384d6118b453467 --- /dev/null +++ b/Storage/lizardfs/meta.yml @@ -0,0 +1,2 @@ +3.12.0-oe2403sp1: + path: 3.12.0/24.03-lts-sp1/Dockerfile \ No newline at end of file